From bf4b27827494e3dc33b1e4333dfe147a36a486b3 Mon Sep 17 00:00:00 2001 From: larkee <31196561+larkee@users.noreply.github.com> Date: Wed, 11 Nov 2020 11:39:46 +1100 Subject: [PATCH] feat!: migrate to v2.0.0 (#147) * chore: remove old generated files * chore: remove type files * refactor: move handwritten files to google/cloud/spanner * chore: update synth.py * chore: regen library via microgenerator * fix: update imports to use new files * fix: update _helper.py * update backup.py * fix: update batch.py * fix: update param_types.py * fix: update pool.py * test: update test_keyset.py * test: update test__opentelemetry_tracing.py * fix: update client.py * test: update test_client.py * fix: update database.py * test: update test_database.py * fix: update commmit call missing kwarg * test: update test_session.py * fix: update streamed.py * test: update TestStreamedResultSet tests in streamed.py * fix: update instance.py * test: update test_instance.py * fix: add _parse_value method to _helper.py * test: update test/_helper.py * fix: update transaction.py and test_transaction.py * fix: remove creating list_value_pb * fix: update snapshot.py and test_snapshot.py * fix: update setup.py * fix: use struct pb for partition_query * fix: support merging RepeatedComposite objects * fix: replace dict with Statements * fix: replace dict with Statements * lint: format code * fix: update synth.py * chore: regen with gapic-generator v0.33.4 * test: increase test coverage * fix: use backup pb instead of dict * fix: update api calls to use supported kwargs * fix: update api calls to use supported kwargs and remove unused function * test: update system tests * test: add tests for _parse_value function * test: update empty list Value to correctly reflect the format * refactor: use _parse_value in _parse_value_pb * refactor: remove unneeded wrapper classes * fix: use default retry (see golang) as ExecuteStreamingSql does not define retry settings * refactor: remove unneeded wrapper class * fix: use pb for params kwargs * test: increase coverage * test: correctly assert UNKNOWN error * chore: remove unneeded replacesments and regen with gapic-generator v0.33.6 * fix: update kwarg to type_ * refactor: remove unused imports * refactor: remove unused imports * fix: update kwarg to all_ * fix: update kwarg to type_ * fix: update kwarg and attribute to type_ * fix: update kwarg to type_ * test: fix test name * style: format code * fix: update kwarg to type_ * fix: update api calls * test: update param types to use param_types types * test: update numeric tests * test: remove unused variable * refactor: remove unused import * docs: update doc references * test: increase test coverage * chore: test with and without opentelemetry for full test coverage * chore: regen samples README * fix: update emulator Github action * docs: manually fix typo that breaks docs * docs: remove unsupported markdown and point to link with removed info * docs: fix broken hyperlink * chore: add replacement for docs formatting issue * chore: regen library (via synth) * chore: exclude noxfile.py from being generated * refator: move handwritten files back into google/cloud/spanner_v1 * style: fix lint * fix: update sample to use correct type * fix: create alias for proto types * fix: update imports * test: update test to use proto-plus structures * ci: update python version for emulator tests * fix: update backup pagination example * test: revert test_keyset for coverage * test: fix expected ranges * fix: ignore google/cloud/spanner_v1/__init__.py for regen * chore: regen (via synth) * docs: revert import changes * refactor: address comments * docs: add UPDATING guide * refactor: revert imports * feat: remove deprecated arguments * docs: update guide to mention deprecated arguments * test: lower required coverage to 99% * test: remove deprecated options * style: fix lint error * Update UPGRADING.md Co-authored-by: skuruppu Co-authored-by: larkee Co-authored-by: skuruppu --- .../__init__.py => .github/snippet-bot.yml | 0 .../integration-tests-against-emulator.yaml | 4 +- .kokoro/docker/docs/Dockerfile | 2 +- .kokoro/docs/common.cfg | 2 +- .kokoro/populate-secrets.sh | 43 + .kokoro/release/common.cfg | 50 +- .kokoro/samples/python3.6/common.cfg | 6 + .kokoro/samples/python3.7/common.cfg | 6 + .kokoro/samples/python3.8/common.cfg | 6 + .kokoro/test-samples.sh | 8 +- .kokoro/trampoline.sh | 15 +- CODE_OF_CONDUCT.md | 123 +- CONTRIBUTING.rst | 19 - MANIFEST.in | 1 - UPGRADING.md | 318 ++ docs/advanced-session-pool-topics.rst | 10 +- docs/api-reference.rst | 12 +- docs/conf.py | 5 +- docs/gapic/v1/admin_database_api.rst | 6 - docs/gapic/v1/admin_database_types.rst | 6 - docs/gapic/v1/admin_instance_api.rst | 6 - docs/gapic/v1/admin_instance_types.rst | 6 - docs/gapic/v1/api.rst | 6 - docs/gapic/v1/transactions.rst | 241 - docs/gapic/v1/types.rst | 6 - docs/instance-usage.rst | 34 +- docs/spanner_admin_database_v1/services.rst | 6 + docs/spanner_admin_database_v1/types.rst | 5 + docs/spanner_admin_instance_v1/services.rst | 6 + docs/spanner_admin_instance_v1/types.rst | 5 + docs/spanner_v1/services.rst | 6 + docs/spanner_v1/types.rst | 5 + docs/transaction-usage.rst | 4 +- google/cloud/spanner.py | 33 +- .../spanner_admin_database_v1/__init__.py | 82 +- .../gapic/database_admin_client.py | 1923 ------- .../gapic/database_admin_client_config.py | 147 - .../spanner_admin_database_v1/gapic/enums.py | 74 - .../database_admin_grpc_transport.py | 410 -- .../proto/__init__.py | 0 .../proto/backup_pb2.py | 1407 ----- .../proto/backup_pb2_grpc.py | 3 - .../proto/common_pb2.py | 148 - .../proto/common_pb2_grpc.py | 3 - .../proto/spanner_database_admin_pb2.py | 2145 ------- .../proto/spanner_database_admin_pb2_grpc.py | 895 --- .../cloud/spanner_admin_database_v1/py.typed | 2 + .../services/__init__.py | 16 + .../services/database_admin/__init__.py | 24 + .../services/database_admin/async_client.py | 1925 +++++++ .../services/database_admin/client.py | 2047 +++++++ .../services/database_admin/pagers.py | 540 ++ .../database_admin/transports/__init__.py | 36 + .../database_admin/transports/base.py | 473 ++ .../database_admin/transports/grpc.py | 817 +++ .../database_admin/transports/grpc_asyncio.py | 831 +++ .../cloud/spanner_admin_database_v1/types.py | 63 - .../types/__init__.py | 83 + .../spanner_admin_database_v1/types/backup.py | 480 ++ .../spanner_admin_database_v1/types/common.py | 51 + .../types/spanner_database_admin.py | 562 ++ .../spanner_admin_instance_v1/__init__.py | 50 +- .../gapic/__init__.py | 0 .../spanner_admin_instance_v1/gapic/enums.py | 76 - .../gapic/instance_admin_client.py | 1223 ---- .../gapic/instance_admin_client_config.py | 112 - .../gapic/transports/__init__.py | 0 .../instance_admin_grpc_transport.py | 340 -- .../proto/__init__.py | 0 .../proto/spanner_instance_admin_pb2.py | 1896 ------- .../proto/spanner_instance_admin_pb2_grpc.py | 640 --- .../cloud/spanner_admin_instance_v1/py.typed | 2 + .../services/__init__.py | 16 + .../services/instance_admin/__init__.py | 24 + .../services/instance_admin/async_client.py | 1282 +++++ .../services/instance_admin/client.py | 1427 +++++ .../services/instance_admin/pagers.py | 282 + .../instance_admin/transports/__init__.py | 36 + .../instance_admin/transports/base.py | 322 ++ .../instance_admin/transports/grpc.py | 651 +++ .../instance_admin/transports/grpc_asyncio.py | 663 +++ .../cloud/spanner_admin_instance_v1/types.py | 66 - .../types/__init__.py | 51 + .../types/spanner_instance_admin.py | 482 ++ google/cloud/spanner_v1/__init__.py | 89 +- google/cloud/spanner_v1/_helpers.py | 95 +- .../spanner_v1/_opentelemetry_tracing.py | 6 +- google/cloud/spanner_v1/backup.py | 49 +- google/cloud/spanner_v1/batch.py | 11 +- google/cloud/spanner_v1/client.py | 142 +- google/cloud/spanner_v1/database.py | 110 +- google/cloud/spanner_v1/gapic/__init__.py | 0 google/cloud/spanner_v1/gapic/enums.py | 129 - .../cloud/spanner_v1/gapic/spanner_client.py | 1913 ------- .../spanner_v1/gapic/spanner_client_config.py | 137 - .../spanner_v1/gapic/transports/__init__.py | 0 .../gapic/transports/spanner.grpc.config | 88 - .../transports/spanner_grpc_transport.py | 415 -- google/cloud/spanner_v1/instance.py | 132 +- google/cloud/spanner_v1/keyset.py | 10 +- google/cloud/spanner_v1/param_types.py | 38 +- google/cloud/spanner_v1/pool.py | 10 +- google/cloud/spanner_v1/proto/__init__.py | 0 google/cloud/spanner_v1/proto/keys_pb2.py | 381 -- .../cloud/spanner_v1/proto/keys_pb2_grpc.py | 3 - google/cloud/spanner_v1/proto/mutation_pb2.py | 448 -- .../spanner_v1/proto/mutation_pb2_grpc.py | 3 - .../cloud/spanner_v1/proto/query_plan_pb2.py | 623 -- .../spanner_v1/proto/query_plan_pb2_grpc.py | 3 - .../cloud/spanner_v1/proto/result_set_pb2.py | 633 --- .../spanner_v1/proto/result_set_pb2_grpc.py | 3 - .../proto/spanner_database_admin.proto | 302 - .../proto/spanner_instance_admin.proto | 475 -- google/cloud/spanner_v1/proto/spanner_pb2.py | 3437 ----------- .../spanner_v1/proto/spanner_pb2_grpc.py | 819 --- .../cloud/spanner_v1/proto/transaction.proto | 278 +- .../cloud/spanner_v1/proto/transaction_pb2.py | 1028 ---- .../spanner_v1/proto/transaction_pb2_grpc.py | 3 - google/cloud/spanner_v1/proto/type.proto | 2 +- google/cloud/spanner_v1/proto/type_pb2.py | 418 -- .../cloud/spanner_v1/proto/type_pb2_grpc.py | 3 - google/cloud/spanner_v1/py.typed | 2 + google/cloud/spanner_v1/services/__init__.py | 16 + .../spanner_v1/services/spanner/__init__.py | 24 + .../services/spanner/async_client.py | 1402 +++++ .../spanner_v1/services/spanner/client.py | 1550 +++++ .../spanner_v1/services/spanner/pagers.py | 148 + .../services/spanner/transports/__init__.py | 36 + .../services/spanner/transports/base.py | 420 ++ .../services/spanner/transports/grpc.py | 741 +++ .../spanner/transports/grpc_asyncio.py | 760 +++ google/cloud/spanner_v1/session.py | 29 +- google/cloud/spanner_v1/snapshot.py | 104 +- google/cloud/spanner_v1/streamed.py | 86 +- google/cloud/spanner_v1/transaction.py | 69 +- google/cloud/spanner_v1/types.py | 67 - google/cloud/spanner_v1/types/__init__.py | 103 + google/cloud/spanner_v1/types/keys.py | 210 + google/cloud/spanner_v1/types/mutation.py | 145 + google/cloud/spanner_v1/types/query_plan.py | 165 + google/cloud/spanner_v1/types/result_set.py | 263 + google/cloud/spanner_v1/types/spanner.py | 948 ++++ google/cloud/spanner_v1/types/transaction.py | 231 + google/cloud/spanner_v1/types/type.py | 113 + noxfile.py | 82 +- samples/samples/README.rst | 118 +- samples/samples/backup_sample.py | 7 +- samples/samples/noxfile.py | 31 +- samples/samples/quickstart_test.py | 2 +- samples/samples/snippets.py | 6 +- scripts/decrypt-secrets.sh | 15 +- ...ixup_spanner_admin_database_v1_keywords.py | 194 + ...ixup_spanner_admin_instance_v1_keywords.py | 187 + scripts/fixup_spanner_v1_keywords.py | 192 + setup.py | 16 +- .../__init__.py => stale_outputs_checked | 0 synth.metadata | 16 +- synth.py | 115 +- tests/_helpers.py | 2 +- tests/system/test_system.py | 83 +- .../spanner_admin_database_v1/__init__.py | 1 + .../test_database_admin.py | 5050 +++++++++++++++++ .../spanner_admin_instance_v1/__init__.py | 1 + .../test_instance_admin.py | 3380 +++++++++++ tests/unit/gapic/spanner_v1/__init__.py | 1 + tests/unit/gapic/spanner_v1/test_spanner.py | 3462 +++++++++++ .../gapic/v1/test_database_admin_client_v1.py | 842 --- .../gapic/v1/test_instance_admin_client_v1.py | 538 -- tests/unit/gapic/v1/test_spanner_client_v1.py | 722 --- tests/unit/test__helpers.py | 313 +- tests/unit/test__opentelemetry_tracing.py | 33 +- tests/unit/test_backup.py | 162 +- tests/unit/test_batch.py | 45 +- tests/unit/test_client.py | 143 +- tests/unit/test_database.py | 238 +- tests/unit/test_instance.py | 330 +- tests/unit/test_keyset.py | 56 +- tests/unit/test_param_types.py | 23 +- tests/unit/test_pool.py | 15 +- tests/unit/test_session.py | 188 +- tests/unit/test_snapshot.py | 172 +- tests/unit/test_streamed.py | 513 +- tests/unit/test_transaction.py | 109 +- 183 files changed, 35546 insertions(+), 27564 deletions(-) rename google/cloud/spanner_admin_database_v1/gapic/__init__.py => .github/snippet-bot.yml (100%) create mode 100755 .kokoro/populate-secrets.sh create mode 100644 UPGRADING.md delete mode 100644 docs/gapic/v1/admin_database_api.rst delete mode 100644 docs/gapic/v1/admin_database_types.rst delete mode 100644 docs/gapic/v1/admin_instance_api.rst delete mode 100644 docs/gapic/v1/admin_instance_types.rst delete mode 100644 docs/gapic/v1/api.rst delete mode 100644 docs/gapic/v1/transactions.rst delete mode 100644 docs/gapic/v1/types.rst create mode 100644 docs/spanner_admin_database_v1/services.rst create mode 100644 docs/spanner_admin_database_v1/types.rst create mode 100644 docs/spanner_admin_instance_v1/services.rst create mode 100644 docs/spanner_admin_instance_v1/types.rst create mode 100644 docs/spanner_v1/services.rst create mode 100644 docs/spanner_v1/types.rst delete mode 100644 google/cloud/spanner_admin_database_v1/gapic/database_admin_client.py delete mode 100644 google/cloud/spanner_admin_database_v1/gapic/database_admin_client_config.py delete mode 100644 google/cloud/spanner_admin_database_v1/gapic/enums.py delete mode 100644 google/cloud/spanner_admin_database_v1/gapic/transports/database_admin_grpc_transport.py delete mode 100644 google/cloud/spanner_admin_database_v1/proto/__init__.py delete mode 100644 google/cloud/spanner_admin_database_v1/proto/backup_pb2.py delete mode 100644 google/cloud/spanner_admin_database_v1/proto/backup_pb2_grpc.py delete mode 100644 google/cloud/spanner_admin_database_v1/proto/common_pb2.py delete mode 100644 google/cloud/spanner_admin_database_v1/proto/common_pb2_grpc.py delete mode 100644 google/cloud/spanner_admin_database_v1/proto/spanner_database_admin_pb2.py delete mode 100644 google/cloud/spanner_admin_database_v1/proto/spanner_database_admin_pb2_grpc.py create mode 100644 google/cloud/spanner_admin_database_v1/py.typed create mode 100644 google/cloud/spanner_admin_database_v1/services/__init__.py create mode 100644 google/cloud/spanner_admin_database_v1/services/database_admin/__init__.py create mode 100644 google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py create mode 100644 google/cloud/spanner_admin_database_v1/services/database_admin/client.py create mode 100644 google/cloud/spanner_admin_database_v1/services/database_admin/pagers.py create mode 100644 google/cloud/spanner_admin_database_v1/services/database_admin/transports/__init__.py create mode 100644 google/cloud/spanner_admin_database_v1/services/database_admin/transports/base.py create mode 100644 google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py create mode 100644 google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py delete mode 100644 google/cloud/spanner_admin_database_v1/types.py create mode 100644 google/cloud/spanner_admin_database_v1/types/__init__.py create mode 100644 google/cloud/spanner_admin_database_v1/types/backup.py create mode 100644 google/cloud/spanner_admin_database_v1/types/common.py create mode 100644 google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py delete mode 100644 google/cloud/spanner_admin_instance_v1/gapic/__init__.py delete mode 100644 google/cloud/spanner_admin_instance_v1/gapic/enums.py delete mode 100644 google/cloud/spanner_admin_instance_v1/gapic/instance_admin_client.py delete mode 100644 google/cloud/spanner_admin_instance_v1/gapic/instance_admin_client_config.py delete mode 100644 google/cloud/spanner_admin_instance_v1/gapic/transports/__init__.py delete mode 100644 google/cloud/spanner_admin_instance_v1/gapic/transports/instance_admin_grpc_transport.py delete mode 100644 google/cloud/spanner_admin_instance_v1/proto/__init__.py delete mode 100644 google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin_pb2.py delete mode 100644 google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin_pb2_grpc.py create mode 100644 google/cloud/spanner_admin_instance_v1/py.typed create mode 100644 google/cloud/spanner_admin_instance_v1/services/__init__.py create mode 100644 google/cloud/spanner_admin_instance_v1/services/instance_admin/__init__.py create mode 100644 google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py create mode 100644 google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py create mode 100644 google/cloud/spanner_admin_instance_v1/services/instance_admin/pagers.py create mode 100644 google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/__init__.py create mode 100644 google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/base.py create mode 100644 google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc.py create mode 100644 google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc_asyncio.py delete mode 100644 google/cloud/spanner_admin_instance_v1/types.py create mode 100644 google/cloud/spanner_admin_instance_v1/types/__init__.py create mode 100644 google/cloud/spanner_admin_instance_v1/types/spanner_instance_admin.py delete mode 100644 google/cloud/spanner_v1/gapic/__init__.py delete mode 100644 google/cloud/spanner_v1/gapic/enums.py delete mode 100644 google/cloud/spanner_v1/gapic/spanner_client.py delete mode 100644 google/cloud/spanner_v1/gapic/spanner_client_config.py delete mode 100644 google/cloud/spanner_v1/gapic/transports/__init__.py delete mode 100755 google/cloud/spanner_v1/gapic/transports/spanner.grpc.config delete mode 100644 google/cloud/spanner_v1/gapic/transports/spanner_grpc_transport.py delete mode 100644 google/cloud/spanner_v1/proto/__init__.py delete mode 100644 google/cloud/spanner_v1/proto/keys_pb2.py delete mode 100644 google/cloud/spanner_v1/proto/keys_pb2_grpc.py delete mode 100644 google/cloud/spanner_v1/proto/mutation_pb2.py delete mode 100644 google/cloud/spanner_v1/proto/mutation_pb2_grpc.py delete mode 100644 google/cloud/spanner_v1/proto/query_plan_pb2.py delete mode 100644 google/cloud/spanner_v1/proto/query_plan_pb2_grpc.py delete mode 100644 google/cloud/spanner_v1/proto/result_set_pb2.py delete mode 100644 google/cloud/spanner_v1/proto/result_set_pb2_grpc.py delete mode 100644 google/cloud/spanner_v1/proto/spanner_database_admin.proto delete mode 100644 google/cloud/spanner_v1/proto/spanner_instance_admin.proto delete mode 100644 google/cloud/spanner_v1/proto/spanner_pb2.py delete mode 100644 google/cloud/spanner_v1/proto/spanner_pb2_grpc.py delete mode 100644 google/cloud/spanner_v1/proto/transaction_pb2.py delete mode 100644 google/cloud/spanner_v1/proto/transaction_pb2_grpc.py delete mode 100644 google/cloud/spanner_v1/proto/type_pb2.py delete mode 100644 google/cloud/spanner_v1/proto/type_pb2_grpc.py create mode 100644 google/cloud/spanner_v1/py.typed create mode 100644 google/cloud/spanner_v1/services/__init__.py create mode 100644 google/cloud/spanner_v1/services/spanner/__init__.py create mode 100644 google/cloud/spanner_v1/services/spanner/async_client.py create mode 100644 google/cloud/spanner_v1/services/spanner/client.py create mode 100644 google/cloud/spanner_v1/services/spanner/pagers.py create mode 100644 google/cloud/spanner_v1/services/spanner/transports/__init__.py create mode 100644 google/cloud/spanner_v1/services/spanner/transports/base.py create mode 100644 google/cloud/spanner_v1/services/spanner/transports/grpc.py create mode 100644 google/cloud/spanner_v1/services/spanner/transports/grpc_asyncio.py delete mode 100644 google/cloud/spanner_v1/types.py create mode 100644 google/cloud/spanner_v1/types/__init__.py create mode 100644 google/cloud/spanner_v1/types/keys.py create mode 100644 google/cloud/spanner_v1/types/mutation.py create mode 100644 google/cloud/spanner_v1/types/query_plan.py create mode 100644 google/cloud/spanner_v1/types/result_set.py create mode 100644 google/cloud/spanner_v1/types/spanner.py create mode 100644 google/cloud/spanner_v1/types/transaction.py create mode 100644 google/cloud/spanner_v1/types/type.py create mode 100644 scripts/fixup_spanner_admin_database_v1_keywords.py create mode 100644 scripts/fixup_spanner_admin_instance_v1_keywords.py create mode 100644 scripts/fixup_spanner_v1_keywords.py rename google/cloud/spanner_admin_database_v1/gapic/transports/__init__.py => stale_outputs_checked (100%) create mode 100644 tests/unit/gapic/spanner_admin_database_v1/__init__.py create mode 100644 tests/unit/gapic/spanner_admin_database_v1/test_database_admin.py create mode 100644 tests/unit/gapic/spanner_admin_instance_v1/__init__.py create mode 100644 tests/unit/gapic/spanner_admin_instance_v1/test_instance_admin.py create mode 100644 tests/unit/gapic/spanner_v1/__init__.py create mode 100644 tests/unit/gapic/spanner_v1/test_spanner.py delete mode 100644 tests/unit/gapic/v1/test_database_admin_client_v1.py delete mode 100644 tests/unit/gapic/v1/test_instance_admin_client_v1.py delete mode 100644 tests/unit/gapic/v1/test_spanner_client_v1.py diff --git a/google/cloud/spanner_admin_database_v1/gapic/__init__.py b/.github/snippet-bot.yml similarity index 100% rename from google/cloud/spanner_admin_database_v1/gapic/__init__.py rename to .github/snippet-bot.yml diff --git a/.github/workflows/integration-tests-against-emulator.yaml b/.github/workflows/integration-tests-against-emulator.yaml index d957a96662..803064a38e 100644 --- a/.github/workflows/integration-tests-against-emulator.yaml +++ b/.github/workflows/integration-tests-against-emulator.yaml @@ -21,11 +21,11 @@ jobs: - name: Setup Python uses: actions/setup-python@v2 with: - python-version: 3.7 + python-version: 3.8 - name: Install nox run: python -m pip install nox - name: Run system tests - run: nox -s system-3.7 + run: nox -s system env: SPANNER_EMULATOR_HOST: localhost:9010 GOOGLE_CLOUD_PROJECT: emulator-test-project diff --git a/.kokoro/docker/docs/Dockerfile b/.kokoro/docker/docs/Dockerfile index f4c0758ce0..412b0b56a9 100644 --- a/.kokoro/docker/docs/Dockerfile +++ b/.kokoro/docker/docs/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ubuntu:20.10 +from ubuntu:20.04 ENV DEBIAN_FRONTEND noninteractive diff --git a/.kokoro/docs/common.cfg b/.kokoro/docs/common.cfg index ddb827fc6a..e58f8f473e 100644 --- a/.kokoro/docs/common.cfg +++ b/.kokoro/docs/common.cfg @@ -30,7 +30,7 @@ env_vars: { env_vars: { key: "V2_STAGING_BUCKET" - value: "docs-staging-v2-staging" + value: "docs-staging-v2" } # It will upload the docker image after successful builds. diff --git a/.kokoro/populate-secrets.sh b/.kokoro/populate-secrets.sh new file mode 100755 index 0000000000..f52514257e --- /dev/null +++ b/.kokoro/populate-secrets.sh @@ -0,0 +1,43 @@ +#!/bin/bash +# Copyright 2020 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +function now { date +"%Y-%m-%d %H:%M:%S" | tr -d '\n' ;} +function msg { println "$*" >&2 ;} +function println { printf '%s\n' "$(now) $*" ;} + + +# Populates requested secrets set in SECRET_MANAGER_KEYS from service account: +# kokoro-trampoline@cloud-devrel-kokoro-resources.iam.gserviceaccount.com +SECRET_LOCATION="${KOKORO_GFILE_DIR}/secret_manager" +msg "Creating folder on disk for secrets: ${SECRET_LOCATION}" +mkdir -p ${SECRET_LOCATION} +for key in $(echo ${SECRET_MANAGER_KEYS} | sed "s/,/ /g") +do + msg "Retrieving secret ${key}" + docker run --entrypoint=gcloud \ + --volume=${KOKORO_GFILE_DIR}:${KOKORO_GFILE_DIR} \ + gcr.io/google.com/cloudsdktool/cloud-sdk \ + secrets versions access latest \ + --project cloud-devrel-kokoro-resources \ + --secret ${key} > \ + "${SECRET_LOCATION}/${key}" + if [[ $? == 0 ]]; then + msg "Secret written to ${SECRET_LOCATION}/${key}" + else + msg "Error retrieving secret ${key}" + fi +done diff --git a/.kokoro/release/common.cfg b/.kokoro/release/common.cfg index 05c943b0c6..47b6a1fba3 100644 --- a/.kokoro/release/common.cfg +++ b/.kokoro/release/common.cfg @@ -23,42 +23,18 @@ env_vars: { value: "github/python-spanner/.kokoro/release.sh" } -# Fetch the token needed for reporting release status to GitHub -before_action { - fetch_keystore { - keystore_resource { - keystore_config_id: 73713 - keyname: "yoshi-automation-github-key" - } - } -} - -# Fetch PyPI password -before_action { - fetch_keystore { - keystore_resource { - keystore_config_id: 73713 - keyname: "google_cloud_pypi_password" - } - } -} - -# Fetch magictoken to use with Magic Github Proxy -before_action { - fetch_keystore { - keystore_resource { - keystore_config_id: 73713 - keyname: "releasetool-magictoken" - } - } +# Fetch PyPI password +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 73713 + keyname: "google_cloud_pypi_password" + } + } } -# Fetch api key to use with Magic Github Proxy -before_action { - fetch_keystore { - keystore_resource { - keystore_config_id: 73713 - keyname: "magic-github-proxy-api-key" - } - } -} +# Tokens needed to report release status back to GitHub +env_vars: { + key: "SECRET_MANAGER_KEYS" + value: "releasetool-publish-reporter-app,releasetool-publish-reporter-googleapis-installation,releasetool-publish-reporter-pem" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.6/common.cfg b/.kokoro/samples/python3.6/common.cfg index 093647288c..58b15c2849 100644 --- a/.kokoro/samples/python3.6/common.cfg +++ b/.kokoro/samples/python3.6/common.cfg @@ -13,6 +13,12 @@ env_vars: { value: "py-3.6" } +# Declare build specific Cloud project. +env_vars: { + key: "BUILD_SPECIFIC_GCLOUD_PROJECT" + value: "python-docs-samples-tests-py36" +} + env_vars: { key: "TRAMPOLINE_BUILD_FILE" value: "github/python-spanner/.kokoro/test-samples.sh" diff --git a/.kokoro/samples/python3.7/common.cfg b/.kokoro/samples/python3.7/common.cfg index cc54c52285..07195c4c5e 100644 --- a/.kokoro/samples/python3.7/common.cfg +++ b/.kokoro/samples/python3.7/common.cfg @@ -13,6 +13,12 @@ env_vars: { value: "py-3.7" } +# Declare build specific Cloud project. +env_vars: { + key: "BUILD_SPECIFIC_GCLOUD_PROJECT" + value: "python-docs-samples-tests-py37" +} + env_vars: { key: "TRAMPOLINE_BUILD_FILE" value: "github/python-spanner/.kokoro/test-samples.sh" diff --git a/.kokoro/samples/python3.8/common.cfg b/.kokoro/samples/python3.8/common.cfg index 04da5ee7ef..58713430dd 100644 --- a/.kokoro/samples/python3.8/common.cfg +++ b/.kokoro/samples/python3.8/common.cfg @@ -13,6 +13,12 @@ env_vars: { value: "py-3.8" } +# Declare build specific Cloud project. +env_vars: { + key: "BUILD_SPECIFIC_GCLOUD_PROJECT" + value: "python-docs-samples-tests-py38" +} + env_vars: { key: "TRAMPOLINE_BUILD_FILE" value: "github/python-spanner/.kokoro/test-samples.sh" diff --git a/.kokoro/test-samples.sh b/.kokoro/test-samples.sh index 77a94bb6d7..469771e159 100755 --- a/.kokoro/test-samples.sh +++ b/.kokoro/test-samples.sh @@ -28,6 +28,12 @@ if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then git checkout $LATEST_RELEASE fi +# Exit early if samples directory doesn't exist +if [ ! -d "./samples" ]; then + echo "No tests run. `./samples` not found" + exit 0 +fi + # Disable buffering, so that the logs stream through. export PYTHONUNBUFFERED=1 @@ -101,4 +107,4 @@ cd "$ROOT" # Workaround for Kokoro permissions issue: delete secrets rm testing/{test-env.sh,client-secrets.json,service-account.json} -exit "$RTN" \ No newline at end of file +exit "$RTN" diff --git a/.kokoro/trampoline.sh b/.kokoro/trampoline.sh index e8c4251f3e..f39236e943 100755 --- a/.kokoro/trampoline.sh +++ b/.kokoro/trampoline.sh @@ -15,9 +15,14 @@ set -eo pipefail -python3 "${KOKORO_GFILE_DIR}/trampoline_v1.py" || ret_code=$? +# Always run the cleanup script, regardless of the success of bouncing into +# the container. +function cleanup() { + chmod +x ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh + ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh + echo "cleanup"; +} +trap cleanup EXIT -chmod +x ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh -${KOKORO_GFILE_DIR}/trampoline_cleanup.sh || true - -exit ${ret_code} +$(dirname $0)/populate-secrets.sh # Secret Manager secrets. +python3 "${KOKORO_GFILE_DIR}/trampoline_v1.py" \ No newline at end of file diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index b3d1f60298..039f436812 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,44 +1,95 @@ -# Contributor Code of Conduct +# Code of Conduct -As contributors and maintainers of this project, -and in the interest of fostering an open and welcoming community, -we pledge to respect all people who contribute through reporting issues, -posting feature requests, updating documentation, -submitting pull requests or patches, and other activities. +## Our Pledge -We are committed to making participation in this project -a harassment-free experience for everyone, -regardless of level of experience, gender, gender identity and expression, -sexual orientation, disability, personal appearance, -body size, race, ethnicity, age, religion, or nationality. +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of +experience, education, socio-economic status, nationality, personal appearance, +race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members Examples of unacceptable behavior by participants include: -* The use of sexualized language or imagery -* Personal attacks -* Trolling or insulting/derogatory comments -* Public or private harassment -* Publishing other's private information, -such as physical or electronic -addresses, without explicit permission -* Other unethical or unprofessional conduct. +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. Project maintainers have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct. -By adopting this Code of Conduct, -project maintainers commit themselves to fairly and consistently -applying these principles to every aspect of managing this project. -Project maintainers who do not follow or enforce the Code of Conduct -may be permanently removed from the project team. - -This code of conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. - -Instances of abusive, harassing, or otherwise unacceptable behavior -may be reported by opening an issue -or contacting one or more of the project maintainers. - -This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, -available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, or to ban temporarily or permanently any +contributor for other behaviors that they deem inappropriate, threatening, +offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +This Code of Conduct also applies outside the project spaces when the Project +Steward has a reasonable belief that an individual's behavior may have a +negative impact on the project or its community. + +## Conflict Resolution + +We do not believe that all conflict is bad; healthy debate and disagreement +often yield positive results. However, it is never okay to be disrespectful or +to engage in behavior that violates the project’s code of conduct. + +If you see someone violating the code of conduct, you are encouraged to address +the behavior directly with those involved. Many issues can be resolved quickly +and easily, and this gives people more control over the outcome of their +dispute. If you are unable to resolve the matter for any reason, or if the +behavior is threatening or harassing, report it. We are dedicated to providing +an environment where participants feel welcome and safe. + + +Reports should be directed to *googleapis-stewards@google.com*, the +Project Steward(s) for *Google Cloud Client Libraries*. It is the Project Steward’s duty to +receive and address reported violations of the code of conduct. They will then +work with a committee consisting of representatives from the Open Source +Programs Office and the Google Open Source Strategy team. If for any reason you +are uncomfortable reaching out to the Project Steward, please email +opensource@google.com. + +We will investigate every complaint, but you may not receive a direct response. +We will use our discretion in determining when and how to follow up on reported +incidents, which may range from not taking action to permanent expulsion from +the project and project-sponsored spaces. We will notify the accused of the +report and provide them an opportunity to discuss it before any action is taken. +The identity of the reporter will be omitted from the details of the report +supplied to the accused. In potentially harmful situations, such as ongoing +harassment or threats to anyone's safety, we may take action without notice. + +## Attribution + +This Code of Conduct is adapted from the Contributor Covenant, version 1.4, +available at +https://www.contributor-covenant.org/version/1/4/code-of-conduct.html \ No newline at end of file diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index e3b0e9d158..11e26783be 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -80,25 +80,6 @@ We use `nox `__ to instrument our tests. .. nox: https://pypi.org/project/nox/ -Note on Editable Installs / Develop Mode -======================================== - -- As mentioned previously, using ``setuptools`` in `develop mode`_ - or a ``pip`` `editable install`_ is not possible with this - library. This is because this library uses `namespace packages`_. - For context see `Issue #2316`_ and the relevant `PyPA issue`_. - - Since ``editable`` / ``develop`` mode can't be used, packages - need to be installed directly. Hence your changes to the source - tree don't get incorporated into the **already installed** - package. - -.. _namespace packages: https://www.python.org/dev/peps/pep-0420/ -.. _Issue #2316: https://github.com/GoogleCloudPlatform/google-cloud-python/issues/2316 -.. _PyPA issue: https://github.com/pypa/packaging-problems/issues/12 -.. _develop mode: https://setuptools.readthedocs.io/en/latest/setuptools.html#development-mode -.. _editable install: https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs - ***************************************** I'm getting weird errors... Can you help? ***************************************** diff --git a/MANIFEST.in b/MANIFEST.in index 42e5750549..e9e29d1203 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -16,7 +16,6 @@ # Generated by synthtool. DO NOT EDIT! include README.rst LICENSE -include google/cloud/spanner_v1/gapic/transports/spanner.grpc.config recursive-include google *.json *.proto recursive-include tests * global-exclude *.py[co] diff --git a/UPGRADING.md b/UPGRADING.md new file mode 100644 index 0000000000..e90f2141bf --- /dev/null +++ b/UPGRADING.md @@ -0,0 +1,318 @@ + + + +# 2.0.0 Migration Guide + +The 2.0 release of the `google-cloud-spanner` client is a significant update based on a +[next-gen code generator](https://github.com/googleapis/gapic-generator-python). +It drops support for Python versions below 3.6. + +The handwritten client surfaces have minor changes which may require minimal updates to existing user code. + +The generated client surfaces have substantial interface changes. Existing user code which uses these surfaces directly +will require significant updates to use this version. + +This document describes the changes that have been made, and what you need to do to update your usage. + +If you experience issues or have questions, please file an +[issue](https://github.com/googleapis/python-spanner/issues). + + +## Supported Python Versions + +> **WARNING**: Breaking change + +The 2.0.0 release requires Python 3.6+. + +## Handwritten Surface Changes + +### Resource List Methods + +> **WARNING**: Breaking change + +The list methods will now return the resource protos rather than the handwritten interfaces. + +Accessing properties will remain unchanged. However, calling methods will require creating the handwritten interface +from the proto. + +**Before:** +```py +for instance in client.list_instances(): + if "test" in instance.name: + instance.delete() +``` +```py +for backup in instance.list_backups(): + if "test" in backup.name: + backup.delete() +``` +```py +for database in instance.list_databases(): + if "test" in database.name: + database.delete() +``` + +**After:** +```py +for instance_pb in client.list_instances(): + if "test" in instance_pb.name: + instance = Instance.from_pb(instance_pb, client) + instance.delete() +``` +```py +for backup_pb in instance.list_backups(): + if "test" in backup_pb.name: + backup = Backup.from_pb(backup_pb, instance) + backup.delete() +``` +```py +for database_pb in instance.list_databases(): + if "test" in database_pb.name: + database = Database.from_pb(database_pb, instance) + database.delete() +``` + + +### Resource List Pagination + +> **WARNING**: Breaking change + +The library now handles pages for the user. Previously, the library would return a page generator which required a user +to then iterate over each page to get the resource. Now, the library handles iterating over the pages and only returns +the resource protos. + +**Before:** +```py +for page in client.list_instances(page_size=5): + for instance in page: + ... +``` +```py +for page in instance.list_backups(page_size=5): + for backup in page: + ... +``` +```py +for page in instance.list_databases(page_size=5): + for database in page: + ... +``` + +**After:** +```py +for instance_pb in client.list_instances(page_size=5): + ... +``` +```py +for backup_pb in instance.list_backups(page_size=5): + ... +``` +```py +for database_pb in instance.list_databases(page_size=5): + ... +``` + +### Deprecated Method Arguments + +> **WARNING**: Breaking change + +Deprecated arguments have been removed. +If you use these arguments, they have no effect and can be removed without consequence. +`user_agent` can be specified using `client_info` instead. +Users should not be using `page_token` directly as the library handles pagination under the hood. + +**Before:** +```py +client = Client(user_agent=user_agent) +``` +```py +for instance in list_instances(page_token=page_token): + ... +``` +```py +for config in list_instance_configs(page_token=page_token): + ... +``` +```py +for database in list_databases(page_token=page_token): + ... +``` + +**After:** +```py +client = Client() +``` +```py +for instance_pb in client.list_instances(): + ... +``` +```py +for instance_config_pb in client.list_instance_configs(): + ... +``` +```py +for database_pb in instance.list_databases(): + ... +``` + + +## Generated Surface Changes + + +### Method Calls + +> **WARNING**: Breaking change + +Methods expect request objects. We provide scripts that will convert most common use cases. + +* Install the library + +```py +python3 -m pip install google-cloud-spanner +``` + +* The scripts `fixup_spanner_v1_keywords.py`, `fixup_spanner_admin_database_v1_keywords.py`, and +`fixup_spanner_admin_instance_v1_keywords.py` are shipped with the library. They expect an input directory (with the +code to convert) and an empty destination directory. + +```sh +$ fixup_spanner_v1_keywords.py --input-directory .samples/ --output-directory samples/ +``` + +>**WARNING**: These scripts will change any calls that match one of the methods. This may cause issues if you also use +>the handwritten surfaces e.g. `client.list_instances()` + +#### More details + + In `google-cloud-spanner<2.0.0`, parameters required by the API were positional parameters and optional parameters were + keyword parameters. + + **Before:** + ```py +def list_instances( + self, + parent, + page_size=None, + filter_=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + ``` + + In the 2.0.0 release, all methods have a single positional parameter `request`. Method docstrings indicate whether a + parameter is required or optional. + + Some methods have additional keyword only parameters. The available parameters depend on the + [`google.api.method_signature` annotation](https://github.com/googleapis/googleapis/blob/master/google/spanner/admin/instance/v1/spanner_instance_admin.proto#L86) specified by the API producer. + + + **After:** + ```py +def list_instances( + self, + request: spanner_instance_admin.ListInstancesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListInstancesPager: + ``` + + > **NOTE:** The `request` parameter and flattened keyword parameters for the API are mutually exclusive. + > Passing both will result in an error. + + + Both of these calls are valid: + + ```py + response = client.list_instances( + request={ + "parent": project_name, + } + ) + ``` + + ```py + response = client.execute_sql( + parent=project_name, + ) + ``` + + This call is invalid because it mixes `request` with a keyword argument `parent`. Executing this code + will result in an error. + + ```py + response = client.execute_sql( + request={}, + parent=project_name, + ) + ``` + +### Enum and protos + +> **WARNING**: Breaking change + +Generated GAPIC protos have been moved under `types`. Import paths need to be adjusted. + +**Before:** +```py +from google.cloud.spanner_v1.proto import type_pb2 + +param_types = { + "start_title": type_pb2.Type(code=type_pb2.STRING), + "end_title": type_pb2.Type(code=type_pb2.STRING), +} +``` +**After:** +```py +from google.cloud.spanner_v1 import Type +from google.cloud.spanner_v1 import TypeCode + +param_types = { + "start_title": Type(code=TypeCode.STRING), + "end_title": Type(code=TypeCode.STRING), +} +``` +**Preferred:** +```py +from google.cloud import spanner + +param_types = { + "start_title": spanner.param_types.STRING, + "end_title": spanner.param_types.STRING, +} +``` + +Generated GAPIC enum types have also been moved under `types`. Import paths need to be adjusted. + +**Before:** +```py +from google.cloud.spanner_admin_database_v1.gapic import enums + +state = enums.Backup.State.READY +``` +**After:** +```py +from google.cloud.spanner_admin_database_v1 import types + +state = types.Backup.State.READY +``` +**Preferred:** +```py +from google.cloud.spanner_admin_database_v1 import Backup + +state = Backup.State.READY +``` diff --git a/docs/advanced-session-pool-topics.rst b/docs/advanced-session-pool-topics.rst index 1b21fdcc9b..ea64c98a10 100644 --- a/docs/advanced-session-pool-topics.rst +++ b/docs/advanced-session-pool-topics.rst @@ -6,7 +6,7 @@ Custom Session Pool Implementations You can supply your own pool implementation, which must satisfy the contract laid out in -:class:`~google.cloud.spanner.pool.AbstractSessionPool`: +:class:`~google.cloud.spanner_v1.pool.AbstractSessionPool`: .. code-block:: python @@ -35,11 +35,11 @@ Lowering latency for read / query operations Some applications may need to minimize latency for read operations, including particularly the overhead of making an API request to create or refresh a -session. :class:`~google.cloud.spanner.pool.PingingPool` is designed for such +session. :class:`~google.cloud.spanner_v1.pool.PingingPool` is designed for such applications, which need to configure a background thread to do the work of keeping the sessions fresh. -Create an instance of :class:`~google.cloud.spanner.pool.PingingPool`: +Create an instance of :class:`~google.cloud.spanner_v1.pool.PingingPool`: .. code-block:: python @@ -74,12 +74,12 @@ Lowering latency for mixed read-write operations Some applications may need to minimize latency for read write operations, including particularly the overhead of making an API request to create or refresh a session or to begin a session's transaction. -:class:`~google.cloud.spanner.pool.TransactionPingingPool` is designed for +:class:`~google.cloud.spanner_v1.pool.TransactionPingingPool` is designed for such applications, which need to configure a background thread to do the work of keeping the sessions fresh and starting their transactions after use. Create an instance of -:class:`~google.cloud.spanner.pool.TransactionPingingPool`: +:class:`~google.cloud.spanner_v1.pool.TransactionPingingPool`: .. code-block:: python diff --git a/docs/api-reference.rst b/docs/api-reference.rst index c767b23afa..30f67cd300 100644 --- a/docs/api-reference.rst +++ b/docs/api-reference.rst @@ -25,9 +25,9 @@ and some advanced use cases may wish to interact with these directly: .. toctree:: :maxdepth: 1 - gapic/v1/api - gapic/v1/types - gapic/v1/admin_database_api - gapic/v1/admin_database_types - gapic/v1/admin_instance_api - gapic/v1/admin_instance_types + spanner_v1/services + spanner_v1/types + spanner_admin_database_v1/services + spanner_admin_database_v1/types + spanner_admin_instance_v1/services + spanner_admin_instance_v1/types diff --git a/docs/conf.py b/docs/conf.py index 9eee0015d1..7d53976561 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -29,7 +29,7 @@ # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = "1.6.3" +needs_sphinx = "1.5.5" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom @@ -39,6 +39,7 @@ "sphinx.ext.autosummary", "sphinx.ext.intersphinx", "sphinx.ext.coverage", + "sphinx.ext.doctest", "sphinx.ext.napoleon", "sphinx.ext.todo", "sphinx.ext.viewcode", @@ -346,7 +347,7 @@ intersphinx_mapping = { "python": ("http://python.readthedocs.org/en/latest/", None), "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), - "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), + "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,), "grpc": ("https://grpc.io/grpc/python/", None), } diff --git a/docs/gapic/v1/admin_database_api.rst b/docs/gapic/v1/admin_database_api.rst deleted file mode 100644 index c63f242e85..0000000000 --- a/docs/gapic/v1/admin_database_api.rst +++ /dev/null @@ -1,6 +0,0 @@ -Spanner Admin Database Client API -================================= - -.. automodule:: google.cloud.spanner_admin_database_v1 - :members: - :inherited-members: diff --git a/docs/gapic/v1/admin_database_types.rst b/docs/gapic/v1/admin_database_types.rst deleted file mode 100644 index fa9aaa73b1..0000000000 --- a/docs/gapic/v1/admin_database_types.rst +++ /dev/null @@ -1,6 +0,0 @@ -Spanner Admin Database Client Types -=================================== - -.. automodule:: google.cloud.spanner_admin_database_v1.types - :members: - :noindex: diff --git a/docs/gapic/v1/admin_instance_api.rst b/docs/gapic/v1/admin_instance_api.rst deleted file mode 100644 index c8c320a6cf..0000000000 --- a/docs/gapic/v1/admin_instance_api.rst +++ /dev/null @@ -1,6 +0,0 @@ -Spanner Admin Instance Client API -================================= - -.. automodule:: google.cloud.spanner_admin_instance_v1 - :members: - :inherited-members: diff --git a/docs/gapic/v1/admin_instance_types.rst b/docs/gapic/v1/admin_instance_types.rst deleted file mode 100644 index f8f3afa5ff..0000000000 --- a/docs/gapic/v1/admin_instance_types.rst +++ /dev/null @@ -1,6 +0,0 @@ -Spanner Admin Instance Client Types -=================================== - -.. automodule:: google.cloud.spanner_admin_instance_v1.types - :members: - :noindex: diff --git a/docs/gapic/v1/api.rst b/docs/gapic/v1/api.rst deleted file mode 100644 index 79e4835f22..0000000000 --- a/docs/gapic/v1/api.rst +++ /dev/null @@ -1,6 +0,0 @@ -Spanner Client API -================== - -.. automodule:: google.cloud.spanner_v1 - :members: - :inherited-members: diff --git a/docs/gapic/v1/transactions.rst b/docs/gapic/v1/transactions.rst deleted file mode 100644 index d34af43b4a..0000000000 --- a/docs/gapic/v1/transactions.rst +++ /dev/null @@ -1,241 +0,0 @@ -.. - This page is pulled from the TransactionOption type, where this entire - kaboodle is auto-generated. Sphinx does not particularly appreciate - entire narrative documentation, complete with headers, in an arbitrary - class docstring, and complains about this, so I (lukesneeringer@) - manually copied it over here. - - This should probably be updated when the Spanner code is re-generated. - This will be easy to remember because the source that needs to be copied - will be dropped in transaction_pb2.py and Sphinx will complain loudly - about it. - - Internal Google ticket: b/65243734 - -:orphan: - -.. _spanner-txn: - -Transactions -============ - -Each session can have at most one active transaction at a time. After -the active transaction is completed, the session can immediately be -re-used for the next transaction. It is not necessary to create a new -session for each transaction. - -Transaction Modes -================= - -Cloud Spanner supports two transaction modes: - -1. Locking read-write. This type of transaction is the only way to write - data into Cloud Spanner. These transactions rely on pessimistic - locking and, if necessary, two-phase commit. Locking read-write - transactions may abort, requiring the application to retry. - -2. Snapshot read-only. This transaction type provides guaranteed - consistency across several reads, but does not allow writes. Snapshot - read-only transactions can be configured to read at timestamps in the - past. Snapshot read-only transactions do not need to be committed. - -For transactions that only read, snapshot read-only transactions provide -simpler semantics and are almost always faster. In particular, read-only -transactions do not take locks, so they do not conflict with read-write -transactions. As a consequence of not taking locks, they also do not -abort, so retry loops are not needed. - -Transactions may only read/write data in a single database. They may, -however, read/write data in different tables within that database. - -Locking Read-Write Transactions -------------------------------- - -Locking transactions may be used to atomically read-modify-write data -anywhere in a database. This type of transaction is externally -consistent. - -Clients should attempt to minimize the amount of time a transaction is -active. Faster transactions commit with higher probability and cause -less contention. Cloud Spanner attempts to keep read locks active as -long as the transaction continues to do reads, and the transaction has -not been terminated by [Commit][google.spanner.v1.Spanner.Commit] or -[Rollback][google.spanner.v1.Spanner.Rollback]. Long periods of -inactivity at the client may cause Cloud Spanner to release a -transaction's locks and abort it. - -Reads performed within a transaction acquire locks on the data being -read. Writes can only be done at commit time, after all reads have been -completed. Conceptually, a read-write transaction consists of zero or -more reads or SQL queries followed by -[Commit][google.spanner.v1.Spanner.Commit]. At any time before -[Commit][google.spanner.v1.Spanner.Commit], the client can send a -[Rollback][google.spanner.v1.Spanner.Rollback] request to abort the -transaction. - -Semantics -~~~~~~~~~ - -Cloud Spanner can commit the transaction if all read locks it acquired -are still valid at commit time, and it is able to acquire write locks -for all writes. Cloud Spanner can abort the transaction for any reason. -If a commit attempt returns ``ABORTED``, Cloud Spanner guarantees that -the transaction has not modified any user data in Cloud Spanner. - -Unless the transaction commits, Cloud Spanner makes no guarantees about -how long the transaction's locks were held for. It is an error to use -Cloud Spanner locks for any sort of mutual exclusion other than between -Cloud Spanner transactions themselves. - -Retrying Aborted Transactions -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -When a transaction aborts, the application can choose to retry the whole -transaction again. To maximize the chances of successfully committing -the retry, the client should execute the retry in the same session as -the original attempt. The original session's lock priority increases -with each consecutive abort, meaning that each attempt has a slightly -better chance of success than the previous. - -Under some circumstances (e.g., many transactions attempting to modify -the same row(s)), a transaction can abort many times in a short period -before successfully committing. Thus, it is not a good idea to cap the -number of retries a transaction can attempt; instead, it is better to -limit the total amount of wall time spent retrying. - -Idle Transactions -~~~~~~~~~~~~~~~~~ - -A transaction is considered idle if it has no outstanding reads or SQL -queries and has not started a read or SQL query within the last 10 -seconds. Idle transactions can be aborted by Cloud Spanner so that they -don't hold on to locks indefinitely. In that case, the commit will fail -with error ``ABORTED``. - -If this behavior is undesirable, periodically executing a simple SQL -query in the transaction (e.g., ``SELECT 1``) prevents the transaction -from becoming idle. - -Snapshot Read-Only Transactions -------------------------------- - -Snapshot read-only transactions provides a simpler method than locking -read-write transactions for doing several consistent reads. However, -this type of transaction does not support writes. - -Snapshot transactions do not take locks. Instead, they work by choosing -a Cloud Spanner timestamp, then executing all reads at that timestamp. -Since they do not acquire locks, they do not block concurrent read-write -transactions. - -Unlike locking read-write transactions, snapshot read-only transactions -never abort. They can fail if the chosen read timestamp is garbage -collected; however, the default garbage collection policy is generous -enough that most applications do not need to worry about this in -practice. - -Snapshot read-only transactions do not need to call -[Commit][google.spanner.v1.Spanner.Commit] or -[Rollback][google.spanner.v1.Spanner.Rollback] (and in fact are not -permitted to do so). - -To execute a snapshot transaction, the client specifies a timestamp -bound, which tells Cloud Spanner how to choose a read timestamp. - -The types of timestamp bound are: - -- Strong (the default). -- Bounded staleness. -- Exact staleness. - -If the Cloud Spanner database to be read is geographically distributed, -stale read-only transactions can execute more quickly than strong or -read-write transaction, because they are able to execute far from the -leader replica. - -Each type of timestamp bound is discussed in detail below. - -Strong -~~~~~~ - -Strong reads are guaranteed to see the effects of all transactions that -have committed before the start of the read. Furthermore, all rows -yielded by a single read are consistent with each other -- if any part -of the read observes a transaction, all parts of the read see the -transaction. - -Strong reads are not repeatable: two consecutive strong read-only -transactions might return inconsistent results if there are concurrent -writes. If consistency across reads is required, the reads should be -executed within a transaction or at an exact read timestamp. - -See -[TransactionOptions.ReadOnly.strong][google.spanner.v1.TransactionOptions.ReadOnly.strong]. - -Exact Staleness -~~~~~~~~~~~~~~~ - -These timestamp bounds execute reads at a user-specified timestamp. -Reads at a timestamp are guaranteed to see a consistent prefix of the -global transaction history: they observe modifications done by all -transactions with a commit timestamp <= the read timestamp, and observe -none of the modifications done by transactions with a larger commit -timestamp. They will block until all conflicting transactions that may -be assigned commit timestamps <= the read timestamp have finished. - -The timestamp can either be expressed as an absolute Cloud Spanner -commit timestamp or a staleness relative to the current time. - -These modes do not require a "negotiation phase" to pick a timestamp. As -a result, they execute slightly faster than the equivalent boundedly -stale concurrency modes. On the other hand, boundedly stale reads -usually return fresher results. - -See -[TransactionOptions.ReadOnly.read\_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.read\_timestamp] -and -[TransactionOptions.ReadOnly.exact\_staleness][google.spanner.v1.TransactionOptions.ReadOnly.exact\_staleness]. - -Bounded Staleness -~~~~~~~~~~~~~~~~~ - -Bounded staleness modes allow Cloud Spanner to pick the read timestamp, -subject to a user-provided staleness bound. Cloud Spanner chooses the -newest timestamp within the staleness bound that allows execution of the -reads at the closest available replica without blocking. - -All rows yielded are consistent with each other -- if any part of the -read observes a transaction, all parts of the read see the transaction. -Boundedly stale reads are not repeatable: two stale reads, even if they -use the same staleness bound, can execute at different timestamps and -thus return inconsistent results. - -Boundedly stale reads execute in two phases: the first phase negotiates -a timestamp among all replicas needed to serve the read. In the second -phase, reads are executed at the negotiated timestamp. - -As a result of the two phase execution, bounded staleness reads are -usually a little slower than comparable exact staleness reads. However, -they are typically able to return fresher results, and are more likely -to execute at the closest replica. - -Because the timestamp negotiation requires up-front knowledge of which -rows will be read, it can only be used with single-use read-only -transactions. - -See -[TransactionOptions.ReadOnly.max\_staleness][google.spanner.v1.TransactionOptions.ReadOnly.max\_staleness] -and -[TransactionOptions.ReadOnly.min\_read\_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.min\_read\_timestamp]. - -Old Read Timestamps and Garbage Collection -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Cloud Spanner continuously garbage collects deleted and overwritten data -in the background to reclaim storage space. This process is known as -"version GC". By default, version GC reclaims versions after they are -one hour old. Because of this, Cloud Spanner cannot perform reads at -read timestamps more than one hour in the past. This restriction also -applies to in-progress reads and/or SQL queries whose timestamp become -too old while executing. Reads and SQL queries with too-old read -timestamps fail with the error ``FAILED_PRECONDITION``. diff --git a/docs/gapic/v1/types.rst b/docs/gapic/v1/types.rst deleted file mode 100644 index 54424febf3..0000000000 --- a/docs/gapic/v1/types.rst +++ /dev/null @@ -1,6 +0,0 @@ -Spanner Client Types -=================================== - -.. automodule:: google.cloud.spanner_v1.types - :members: - :noindex: diff --git a/docs/instance-usage.rst b/docs/instance-usage.rst index 909e36b93f..55042c2df3 100644 --- a/docs/instance-usage.rst +++ b/docs/instance-usage.rst @@ -1,7 +1,7 @@ Instance Admin ============== -After creating a :class:`~google.cloud.spanner.client.Client`, you can +After creating a :class:`~google.cloud.spanner_v1.client.Client`, you can interact with individual instances for a project. Instance Configurations @@ -12,7 +12,7 @@ specifying the location and other parameters for a set of instances. These configurations are defined by the server, and cannot be changed. To iterate over all instance configurations available to your project, use the -:meth:`~google.cloud.spanner.client.Client.list_instance_configs` +:meth:`~google.cloud.spanner_v1.client.Client.list_instance_configs` method of the client: .. code:: python @@ -22,7 +22,7 @@ method of the client: To fetch a single instance configuration, use the -:meth:`~google.cloud.spanner.client.Client.get_instance_configuration` +:meth:`~google.cloud.spanner_v1.client.Client.get_instance_configuration` method of the client: .. code:: python @@ -37,7 +37,7 @@ List Instances -------------- If you want a comprehensive list of all existing instances, iterate over the -:meth:`~google.cloud.spanner.client.Client.list_instances` method of +:meth:`~google.cloud.spanner_v1.client.Client.list_instances` method of the client: .. code:: python @@ -52,7 +52,7 @@ objects. Instance Factory ---------------- -To create a :class:`~google.cloud.spanner.instance.Instance` object: +To create a :class:`~google.cloud.spanner_v1.instance.Instance` object: .. code:: python @@ -65,7 +65,7 @@ To create a :class:`~google.cloud.spanner.instance.Instance` object: - ``configuration_name`` is the name of the instance configuration to which the instance will be bound. It must be one of the names configured for your project, discoverable via - :meth:`~google.cloud.spanner.client.Client.list_instance_configs`. + :meth:`~google.cloud.spanner_v1.client.Client.list_instance_configs`. - ``node_count`` is a postitive integral count of the number of nodes used by the instance. More nodes allows for higher performance, but at a higher @@ -87,7 +87,7 @@ Create a new Instance --------------------- After creating the instance object, use its -:meth:`~google.cloud.spanner.instance.Instance.create` method to +:meth:`~google.cloud.spanner_v1.instance.Instance.create` method to trigger its creation on the server: .. code:: python @@ -98,7 +98,7 @@ trigger its creation on the server: .. note:: Creating an instance triggers a "long-running operation" and - returns an :class:`google.cloud.spanner.instance.Operation` + returns an :class:`google.cloud.spanner_v1.instance.Operation` object. See :ref:`check-on-current-instance-operation` for polling to find out if the operation is completed. @@ -107,7 +107,7 @@ Refresh metadata for an existing Instance ----------------------------------------- After creating the instance object, reload its server-side configuration -using its :meth:`~google.cloud.spanner.instance.Instance.reload` method: +using its :meth:`~google.cloud.spanner_v1.instance.Instance.reload` method: .. code:: python @@ -121,7 +121,7 @@ Update an existing Instance --------------------------- After creating the instance object, you can update its metadata via -its :meth:`~google.cloud.spanner.instance.Instance.update` method: +its :meth:`~google.cloud.spanner_v1.instance.Instance.update` method: .. code:: python @@ -131,7 +131,7 @@ its :meth:`~google.cloud.spanner.instance.Instance.update` method: .. note:: Update an instance triggers a "long-running operation" and - returns a :class:`google.cloud.spanner.instance.Operation` + returns a :class:`google.cloud.spanner_v1.instance.Operation` object. See :ref:`check-on-current-instance-operation` for polling to find out if the operation is completed. @@ -140,7 +140,7 @@ Delete an existing Instance --------------------------- Delete an instance using its -:meth:`~google.cloud.spanner.instance.Instance.delete` method: +:meth:`~google.cloud.spanner_v1.instance.Instance.delete` method: .. code:: python @@ -152,10 +152,10 @@ Delete an instance using its Resolve Current Instance Operation ---------------------------------- -The :meth:`~google.cloud.spanner.instance.Instance.create` and -:meth:`~google.cloud.spanner.instance.Instance.update` methods of instance +The :meth:`~google.cloud.spanner_v1.instance.Instance.create` and +:meth:`~google.cloud.spanner_v1.instance.Instance.update` methods of instance object trigger long-running operations on the server, and return instances -of the :class:`~google.cloud.spanner.instance.Operation` class. +of the :class:`~google.cloud.spanner_v1.instance.Operation` class. If you want to block on the completion of those operations, use the ``result`` method on the returned objects: @@ -172,8 +172,8 @@ Next Step --------- Now we go down the hierarchy from -:class:`~google.cloud.spanner.instance.Instance` to a -:class:`~google.cloud.spanner.database.Database`. +:class:`~google.cloud.spanner_v1.instance.Instance` to a +:class:`~google.cloud.spanner_v1.database.Database`. Next, learn about the :doc:`database-usage`. diff --git a/docs/spanner_admin_database_v1/services.rst b/docs/spanner_admin_database_v1/services.rst new file mode 100644 index 0000000000..770ff1a8c2 --- /dev/null +++ b/docs/spanner_admin_database_v1/services.rst @@ -0,0 +1,6 @@ +Services for Google Cloud Spanner Admin Database v1 API +======================================================= + +.. automodule:: google.cloud.spanner_admin_database_v1.services.database_admin + :members: + :inherited-members: diff --git a/docs/spanner_admin_database_v1/types.rst b/docs/spanner_admin_database_v1/types.rst new file mode 100644 index 0000000000..da44c33458 --- /dev/null +++ b/docs/spanner_admin_database_v1/types.rst @@ -0,0 +1,5 @@ +Types for Google Cloud Spanner Admin Database v1 API +==================================================== + +.. automodule:: google.cloud.spanner_admin_database_v1.types + :members: diff --git a/docs/spanner_admin_instance_v1/services.rst b/docs/spanner_admin_instance_v1/services.rst new file mode 100644 index 0000000000..44b02ecebb --- /dev/null +++ b/docs/spanner_admin_instance_v1/services.rst @@ -0,0 +1,6 @@ +Services for Google Cloud Spanner Admin Instance v1 API +======================================================= + +.. automodule:: google.cloud.spanner_admin_instance_v1.services.instance_admin + :members: + :inherited-members: diff --git a/docs/spanner_admin_instance_v1/types.rst b/docs/spanner_admin_instance_v1/types.rst new file mode 100644 index 0000000000..b496dfc681 --- /dev/null +++ b/docs/spanner_admin_instance_v1/types.rst @@ -0,0 +1,5 @@ +Types for Google Cloud Spanner Admin Instance v1 API +==================================================== + +.. automodule:: google.cloud.spanner_admin_instance_v1.types + :members: diff --git a/docs/spanner_v1/services.rst b/docs/spanner_v1/services.rst new file mode 100644 index 0000000000..9dbd2fe03e --- /dev/null +++ b/docs/spanner_v1/services.rst @@ -0,0 +1,6 @@ +Services for Google Cloud Spanner v1 API +======================================== + +.. automodule:: google.cloud.spanner_v1.services.spanner + :members: + :inherited-members: diff --git a/docs/spanner_v1/types.rst b/docs/spanner_v1/types.rst new file mode 100644 index 0000000000..15b938d7f3 --- /dev/null +++ b/docs/spanner_v1/types.rst @@ -0,0 +1,5 @@ +Types for Google Cloud Spanner v1 API +===================================== + +.. automodule:: google.cloud.spanner_v1.types + :members: diff --git a/docs/transaction-usage.rst b/docs/transaction-usage.rst index e475894939..4781cfa148 100644 --- a/docs/transaction-usage.rst +++ b/docs/transaction-usage.rst @@ -1,11 +1,11 @@ Read-write Transactions ####################### -A :class:`~google.cloud.spanner.transaction.Transaction` represents a +A :class:`~google.cloud.spanner_v1.transaction.Transaction` represents a transaction: when the transaction commits, it will send any accumulated mutations to the server. -To understand more about how transactions work, visit :ref:`spanner-txn`. +To understand more about how transactions work, visit [Transaction](https://cloud.google.com/spanner/docs/reference/rest/v1/Transaction). To learn more about how to use them in the Python client, continue reading. diff --git a/google/cloud/spanner.py b/google/cloud/spanner.py index 0b1d3d949f..41a77cf7ce 100644 --- a/google/cloud/spanner.py +++ b/google/cloud/spanner.py @@ -1,4 +1,4 @@ -# Copyright 2016 Google LLC All rights reserved. +# Copyright 2016, Google LLC All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,37 +12,36 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Cloud Spanner API package.""" - from __future__ import absolute_import from google.cloud.spanner_v1 import __version__ -from google.cloud.spanner_v1 import AbstractSessionPool -from google.cloud.spanner_v1 import BurstyPool +from google.cloud.spanner_v1 import param_types from google.cloud.spanner_v1 import Client -from google.cloud.spanner_v1 import COMMIT_TIMESTAMP -from google.cloud.spanner_v1 import enums -from google.cloud.spanner_v1 import FixedSizePool from google.cloud.spanner_v1 import KeyRange from google.cloud.spanner_v1 import KeySet -from google.cloud.spanner_v1 import param_types +from google.cloud.spanner_v1 import AbstractSessionPool +from google.cloud.spanner_v1 import BurstyPool +from google.cloud.spanner_v1 import FixedSizePool from google.cloud.spanner_v1 import PingingPool from google.cloud.spanner_v1 import TransactionPingingPool -from google.cloud.spanner_v1 import types +from google.cloud.spanner_v1 import COMMIT_TIMESTAMP __all__ = ( + # google.cloud.spanner "__version__", - "AbstractSessionPool", - "BurstyPool", + "param_types", + # google.cloud.spanner_v1.client "Client", - "COMMIT_TIMESTAMP", - "enums", - "FixedSizePool", + # google.cloud.spanner_v1.keyset "KeyRange", "KeySet", - "param_types", + # google.cloud.spanner_v1.pool + "AbstractSessionPool", + "BurstyPool", + "FixedSizePool", "PingingPool", "TransactionPingingPool", - "types", + # local + "COMMIT_TIMESTAMP", ) diff --git a/google/cloud/spanner_admin_database_v1/__init__.py b/google/cloud/spanner_admin_database_v1/__init__.py index 3a5b42403c..0f5bcd49b1 100644 --- a/google/cloud/spanner_admin_database_v1/__init__.py +++ b/google/cloud/spanner_admin_database_v1/__init__.py @@ -1,29 +1,83 @@ # -*- coding: utf-8 -*- -# -# Copyright 2018 Google LLC + +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# https://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# -from __future__ import absolute_import - -from google.cloud.spanner_admin_database_v1 import types -from google.cloud.spanner_admin_database_v1.gapic import database_admin_client -from google.cloud.spanner_admin_database_v1.gapic import enums - - -class DatabaseAdminClient(database_admin_client.DatabaseAdminClient): - __doc__ = database_admin_client.DatabaseAdminClient.__doc__ - enums = enums +from .services.database_admin import DatabaseAdminClient +from .types.backup import Backup +from .types.backup import BackupInfo +from .types.backup import CreateBackupMetadata +from .types.backup import CreateBackupRequest +from .types.backup import DeleteBackupRequest +from .types.backup import GetBackupRequest +from .types.backup import ListBackupOperationsRequest +from .types.backup import ListBackupOperationsResponse +from .types.backup import ListBackupsRequest +from .types.backup import ListBackupsResponse +from .types.backup import UpdateBackupRequest +from .types.common import OperationProgress +from .types.spanner_database_admin import CreateDatabaseMetadata +from .types.spanner_database_admin import CreateDatabaseRequest +from .types.spanner_database_admin import Database +from .types.spanner_database_admin import DropDatabaseRequest +from .types.spanner_database_admin import GetDatabaseDdlRequest +from .types.spanner_database_admin import GetDatabaseDdlResponse +from .types.spanner_database_admin import GetDatabaseRequest +from .types.spanner_database_admin import ListDatabaseOperationsRequest +from .types.spanner_database_admin import ListDatabaseOperationsResponse +from .types.spanner_database_admin import ListDatabasesRequest +from .types.spanner_database_admin import ListDatabasesResponse +from .types.spanner_database_admin import OptimizeRestoredDatabaseMetadata +from .types.spanner_database_admin import RestoreDatabaseMetadata +from .types.spanner_database_admin import RestoreDatabaseRequest +from .types.spanner_database_admin import RestoreInfo +from .types.spanner_database_admin import RestoreSourceType +from .types.spanner_database_admin import UpdateDatabaseDdlMetadata +from .types.spanner_database_admin import UpdateDatabaseDdlRequest -__all__ = ("enums", "types", "DatabaseAdminClient") +__all__ = ( + "Backup", + "BackupInfo", + "CreateBackupMetadata", + "CreateBackupRequest", + "CreateDatabaseMetadata", + "CreateDatabaseRequest", + "Database", + "DeleteBackupRequest", + "DropDatabaseRequest", + "GetBackupRequest", + "GetDatabaseDdlRequest", + "GetDatabaseDdlResponse", + "GetDatabaseRequest", + "ListBackupOperationsRequest", + "ListBackupOperationsResponse", + "ListBackupsRequest", + "ListBackupsResponse", + "ListDatabaseOperationsRequest", + "ListDatabaseOperationsResponse", + "ListDatabasesRequest", + "ListDatabasesResponse", + "OperationProgress", + "OptimizeRestoredDatabaseMetadata", + "RestoreDatabaseMetadata", + "RestoreDatabaseRequest", + "RestoreInfo", + "RestoreSourceType", + "UpdateBackupRequest", + "UpdateDatabaseDdlMetadata", + "UpdateDatabaseDdlRequest", + "DatabaseAdminClient", +) diff --git a/google/cloud/spanner_admin_database_v1/gapic/database_admin_client.py b/google/cloud/spanner_admin_database_v1/gapic/database_admin_client.py deleted file mode 100644 index dc11cb0283..0000000000 --- a/google/cloud/spanner_admin_database_v1/gapic/database_admin_client.py +++ /dev/null @@ -1,1923 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.spanner.admin.database.v1 DatabaseAdmin API.""" - -import functools -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.operation -import google.api_core.operations_v1 -import google.api_core.page_iterator -import google.api_core.path_template -import google.api_core.protobuf_helpers -import grpc - -from google.cloud.spanner_admin_database_v1.gapic import database_admin_client_config -from google.cloud.spanner_admin_database_v1.gapic import enums -from google.cloud.spanner_admin_database_v1.gapic.transports import ( - database_admin_grpc_transport, -) -from google.cloud.spanner_admin_database_v1.proto import backup_pb2 -from google.cloud.spanner_admin_database_v1.proto import spanner_database_admin_pb2 -from google.cloud.spanner_admin_database_v1.proto import spanner_database_admin_pb2_grpc -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import options_pb2 -from google.iam.v1 import policy_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-spanner").version - - -class DatabaseAdminClient(object): - """ - Cloud Spanner Database Admin API - - The Cloud Spanner Database Admin API can be used to create, drop, and - list databases. It also enables updating the schema of pre-existing - databases. It can be also used to create, delete and list backups for a - database and to restore from an existing backup. - """ - - SERVICE_ADDRESS = "spanner.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.spanner.admin.database.v1.DatabaseAdmin" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - DatabaseAdminClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @classmethod - def backup_path(cls, project, instance, backup): - """Return a fully-qualified backup string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}/backups/{backup}", - project=project, - instance=instance, - backup=backup, - ) - - @classmethod - def database_path(cls, project, instance, database): - """Return a fully-qualified database string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}/databases/{database}", - project=project, - instance=instance, - database=database, - ) - - @classmethod - def instance_path(cls, project, instance): - """Return a fully-qualified instance string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}", - project=project, - instance=instance, - ) - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.DatabaseAdminGrpcTransport, - Callable[[~.Credentials, type], ~.DatabaseAdminGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = database_admin_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=database_admin_grpc_transport.DatabaseAdminGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = database_admin_grpc_transport.DatabaseAdminGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME] - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def create_database( - self, - parent, - create_statement, - extra_statements=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a new Cloud Spanner database and starts to prepare it for - serving. The returned ``long-running operation`` will have a name of the - format ``/operations/`` and can be used to - track preparation of the database. The ``metadata`` field type is - ``CreateDatabaseMetadata``. The ``response`` field type is ``Database``, - if successful. - - Example: - >>> from google.cloud import spanner_admin_database_v1 - >>> - >>> client = spanner_admin_database_v1.DatabaseAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # TODO: Initialize `create_statement`: - >>> create_statement = '' - >>> - >>> response = client.create_database(parent, create_statement) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - parent (str): Required. The name of the instance that will serve the new database. - Values are of the form ``projects//instances/``. - create_statement (str): Required. A ``CREATE DATABASE`` statement, which specifies the ID of - the new database. The database ID must conform to the regular expression - ``[a-z][a-z0-9_\-]*[a-z0-9]`` and be between 2 and 30 characters in - length. If the database ID is a reserved word or if it contains a - hyphen, the database ID must be enclosed in backticks (:literal:`\``). - extra_statements (list[str]): Optional. A list of DDL statements to run inside the newly created - database. Statements can create tables, indexes, etc. These - statements execute atomically with the creation of the database: - if there is an error in any statement, the database is not created. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.operation.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_database" not in self._inner_api_calls: - self._inner_api_calls[ - "create_database" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_database, - default_retry=self._method_configs["CreateDatabase"].retry, - default_timeout=self._method_configs["CreateDatabase"].timeout, - client_info=self._client_info, - ) - - request = spanner_database_admin_pb2.CreateDatabaseRequest( - parent=parent, - create_statement=create_statement, - extra_statements=extra_statements, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["create_database"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - spanner_database_admin_pb2.Database, - metadata_type=spanner_database_admin_pb2.CreateDatabaseMetadata, - ) - - def update_database_ddl( - self, - database, - statements, - operation_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates the schema of a Cloud Spanner database by - creating/altering/dropping tables, columns, indexes, etc. The returned - ``long-running operation`` will have a name of the format - ``/operations/`` and can be used to track - execution of the schema change(s). The ``metadata`` field type is - ``UpdateDatabaseDdlMetadata``. The operation has no response. - - Example: - >>> from google.cloud import spanner_admin_database_v1 - >>> - >>> client = spanner_admin_database_v1.DatabaseAdminClient() - >>> - >>> database = client.database_path('[PROJECT]', '[INSTANCE]', '[DATABASE]') - >>> - >>> # TODO: Initialize `statements`: - >>> statements = [] - >>> - >>> response = client.update_database_ddl(database, statements) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - database (str): Required. The database to update. - statements (list[str]): Required. DDL statements to be applied to the database. - operation_id (str): If empty, the new update request is assigned an - automatically-generated operation ID. Otherwise, ``operation_id`` is - used to construct the name of the resulting ``Operation``. - - Specifying an explicit operation ID simplifies determining whether the - statements were executed in the event that the ``UpdateDatabaseDdl`` - call is replayed, or the return value is otherwise lost: the - ``database`` and ``operation_id`` fields can be combined to form the - ``name`` of the resulting ``longrunning.Operation``: - ``/operations/``. - - ``operation_id`` should be unique within the database, and must be a - valid identifier: ``[a-z][a-z0-9_]*``. Note that automatically-generated - operation IDs always begin with an underscore. If the named operation - already exists, ``UpdateDatabaseDdl`` returns ``ALREADY_EXISTS``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.operation.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_database_ddl" not in self._inner_api_calls: - self._inner_api_calls[ - "update_database_ddl" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_database_ddl, - default_retry=self._method_configs["UpdateDatabaseDdl"].retry, - default_timeout=self._method_configs["UpdateDatabaseDdl"].timeout, - client_info=self._client_info, - ) - - request = spanner_database_admin_pb2.UpdateDatabaseDdlRequest( - database=database, statements=statements, operation_id=operation_id - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("database", database)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["update_database_ddl"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=spanner_database_admin_pb2.UpdateDatabaseDdlMetadata, - ) - - def create_backup( - self, - parent, - backup_id, - backup, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Starts creating a new Cloud Spanner Backup. The returned backup - ``long-running operation`` will have a name of the format - ``projects//instances//backups//operations/`` - and can be used to track creation of the backup. The ``metadata`` field - type is ``CreateBackupMetadata``. The ``response`` field type is - ``Backup``, if successful. Cancelling the returned operation will stop - the creation and delete the backup. There can be only one pending backup - creation per database. Backup creation of different databases can run - concurrently. - - Example: - >>> from google.cloud import spanner_admin_database_v1 - >>> - >>> client = spanner_admin_database_v1.DatabaseAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # TODO: Initialize `backup_id`: - >>> backup_id = '' - >>> - >>> # TODO: Initialize `backup`: - >>> backup = {} - >>> - >>> response = client.create_backup(parent, backup_id, backup) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - parent (str): Required. The name of the instance in which the backup will be - created. This must be the same instance that contains the database the - backup will be created from. The backup will be stored in the - location(s) specified in the instance configuration of this instance. - Values are of the form ``projects//instances/``. - backup_id (str): Required. The id of the backup to be created. The ``backup_id`` - appended to ``parent`` forms the full backup name of the form - ``projects//instances//backups/``. - backup (Union[dict, ~google.cloud.spanner_admin_database_v1.types.Backup]): Required. The backup to create. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_admin_database_v1.types.Backup` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.operation.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_backup" not in self._inner_api_calls: - self._inner_api_calls[ - "create_backup" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_backup, - default_retry=self._method_configs["CreateBackup"].retry, - default_timeout=self._method_configs["CreateBackup"].timeout, - client_info=self._client_info, - ) - - request = backup_pb2.CreateBackupRequest( - parent=parent, backup_id=backup_id, backup=backup - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["create_backup"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - backup_pb2.Backup, - metadata_type=backup_pb2.CreateBackupMetadata, - ) - - def restore_database( - self, - parent, - database_id, - backup=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Create a new database by restoring from a completed backup. The new - database must be in the same project and in an instance with the same - instance configuration as the instance containing the backup. The - returned database ``long-running operation`` has a name of the format - ``projects//instances//databases//operations/``, - and can be used to track the progress of the operation, and to cancel - it. The ``metadata`` field type is ``RestoreDatabaseMetadata``. The - ``response`` type is ``Database``, if successful. Cancelling the - returned operation will stop the restore and delete the database. There - can be only one database being restored into an instance at a time. Once - the restore operation completes, a new restore operation can be - initiated, without waiting for the optimize operation associated with - the first restore to complete. - - Example: - >>> from google.cloud import spanner_admin_database_v1 - >>> - >>> client = spanner_admin_database_v1.DatabaseAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # TODO: Initialize `database_id`: - >>> database_id = '' - >>> - >>> response = client.restore_database(parent, database_id) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - parent (str): Required. The name of the instance in which to create the restored - database. This instance must be in the same project and have the same - instance configuration as the instance containing the source backup. - Values are of the form ``projects//instances/``. - database_id (str): Required. The id of the database to create and restore to. This - database must not already exist. The ``database_id`` appended to - ``parent`` forms the full database name of the form - ``projects//instances//databases/``. - backup (str): Name of the backup from which to restore. Values are of the form - ``projects//instances//backups/``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.operation.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "restore_database" not in self._inner_api_calls: - self._inner_api_calls[ - "restore_database" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.restore_database, - default_retry=self._method_configs["RestoreDatabase"].retry, - default_timeout=self._method_configs["RestoreDatabase"].timeout, - client_info=self._client_info, - ) - - # Sanity check: We have some fields which are mutually exclusive; - # raise ValueError if more than one is sent. - google.api_core.protobuf_helpers.check_oneof(backup=backup) - - request = spanner_database_admin_pb2.RestoreDatabaseRequest( - parent=parent, database_id=database_id, backup=backup - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["restore_database"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - spanner_database_admin_pb2.Database, - metadata_type=spanner_database_admin_pb2.RestoreDatabaseMetadata, - ) - - def list_databases( - self, - parent, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists Cloud Spanner databases. - - Example: - >>> from google.cloud import spanner_admin_database_v1 - >>> - >>> client = spanner_admin_database_v1.DatabaseAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # Iterate over all results - >>> for element in client.list_databases(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_databases(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. The instance whose databases should be listed. Values are - of the form ``projects//instances/``. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.spanner_admin_database_v1.types.Database` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_databases" not in self._inner_api_calls: - self._inner_api_calls[ - "list_databases" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_databases, - default_retry=self._method_configs["ListDatabases"].retry, - default_timeout=self._method_configs["ListDatabases"].timeout, - client_info=self._client_info, - ) - - request = spanner_database_admin_pb2.ListDatabasesRequest( - parent=parent, page_size=page_size - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_databases"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="databases", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def get_database( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets the state of a Cloud Spanner database. - - Example: - >>> from google.cloud import spanner_admin_database_v1 - >>> - >>> client = spanner_admin_database_v1.DatabaseAdminClient() - >>> - >>> name = client.database_path('[PROJECT]', '[INSTANCE]', '[DATABASE]') - >>> - >>> response = client.get_database(name) - - Args: - name (str): Required. The name of the requested database. Values are of the form - ``projects//instances//databases/``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_admin_database_v1.types.Database` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_database" not in self._inner_api_calls: - self._inner_api_calls[ - "get_database" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_database, - default_retry=self._method_configs["GetDatabase"].retry, - default_timeout=self._method_configs["GetDatabase"].timeout, - client_info=self._client_info, - ) - - request = spanner_database_admin_pb2.GetDatabaseRequest(name=name) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_database"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def drop_database( - self, - database, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Drops (aka deletes) a Cloud Spanner database. Completed backups for - the database will be retained according to their ``expire_time``. - - Example: - >>> from google.cloud import spanner_admin_database_v1 - >>> - >>> client = spanner_admin_database_v1.DatabaseAdminClient() - >>> - >>> database = client.database_path('[PROJECT]', '[INSTANCE]', '[DATABASE]') - >>> - >>> client.drop_database(database) - - Args: - database (str): Required. The database to be dropped. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "drop_database" not in self._inner_api_calls: - self._inner_api_calls[ - "drop_database" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.drop_database, - default_retry=self._method_configs["DropDatabase"].retry, - default_timeout=self._method_configs["DropDatabase"].timeout, - client_info=self._client_info, - ) - - request = spanner_database_admin_pb2.DropDatabaseRequest(database=database) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("database", database)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["drop_database"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_database_ddl( - self, - database, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Returns the schema of a Cloud Spanner database as a list of - formatted DDL statements. This method does not show pending schema - updates, those may be queried using the ``Operations`` API. - - Example: - >>> from google.cloud import spanner_admin_database_v1 - >>> - >>> client = spanner_admin_database_v1.DatabaseAdminClient() - >>> - >>> database = client.database_path('[PROJECT]', '[INSTANCE]', '[DATABASE]') - >>> - >>> response = client.get_database_ddl(database) - - Args: - database (str): Required. The database whose schema we wish to get. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_admin_database_v1.types.GetDatabaseDdlResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_database_ddl" not in self._inner_api_calls: - self._inner_api_calls[ - "get_database_ddl" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_database_ddl, - default_retry=self._method_configs["GetDatabaseDdl"].retry, - default_timeout=self._method_configs["GetDatabaseDdl"].timeout, - client_info=self._client_info, - ) - - request = spanner_database_admin_pb2.GetDatabaseDdlRequest(database=database) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("database", database)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_database_ddl"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_iam_policy( - self, - resource, - policy, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Sets the access control policy on a database or backup resource. - Replaces any existing policy. - - Authorization requires ``spanner.databases.setIamPolicy`` permission on - ``resource``. For backups, authorization requires - ``spanner.backups.setIamPolicy`` permission on ``resource``. - - Example: - >>> from google.cloud import spanner_admin_database_v1 - >>> - >>> client = spanner_admin_database_v1.DatabaseAdminClient() - >>> - >>> # TODO: Initialize `resource`: - >>> resource = '' - >>> - >>> # TODO: Initialize `policy`: - >>> policy = {} - >>> - >>> response = client.set_iam_policy(resource, policy) - - Args: - resource (str): REQUIRED: The resource for which the policy is being specified. - See the operation documentation for the appropriate value for this field. - policy (Union[dict, ~google.cloud.spanner_admin_database_v1.types.Policy]): REQUIRED: The complete policy to be applied to the ``resource``. The - size of the policy is limited to a few 10s of KB. An empty policy is a - valid policy but certain Cloud Platform services (such as Projects) - might reject them. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_admin_database_v1.types.Policy` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_admin_database_v1.types.Policy` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_iam_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "set_iam_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_iam_policy, - default_retry=self._method_configs["SetIamPolicy"].retry, - default_timeout=self._method_configs["SetIamPolicy"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.SetIamPolicyRequest(resource=resource, policy=policy) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_iam_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_iam_policy( - self, - resource, - options_=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets the access control policy for a database or backup resource. - Returns an empty policy if a database or backup exists but does not have - a policy set. - - Authorization requires ``spanner.databases.getIamPolicy`` permission on - ``resource``. For backups, authorization requires - ``spanner.backups.getIamPolicy`` permission on ``resource``. - - Example: - >>> from google.cloud import spanner_admin_database_v1 - >>> - >>> client = spanner_admin_database_v1.DatabaseAdminClient() - >>> - >>> # TODO: Initialize `resource`: - >>> resource = '' - >>> - >>> response = client.get_iam_policy(resource) - - Args: - resource (str): REQUIRED: The resource for which the policy is being requested. - See the operation documentation for the appropriate value for this field. - options_ (Union[dict, ~google.cloud.spanner_admin_database_v1.types.GetPolicyOptions]): OPTIONAL: A ``GetPolicyOptions`` object for specifying options to - ``GetIamPolicy``. This field is only used by Cloud IAM. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_admin_database_v1.types.GetPolicyOptions` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_admin_database_v1.types.Policy` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_iam_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "get_iam_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_iam_policy, - default_retry=self._method_configs["GetIamPolicy"].retry, - default_timeout=self._method_configs["GetIamPolicy"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.GetIamPolicyRequest( - resource=resource, options=options_ - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_iam_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def test_iam_permissions( - self, - resource, - permissions, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Returns permissions that the caller has on the specified database or - backup resource. - - Attempting this RPC on a non-existent Cloud Spanner database will result - in a NOT_FOUND error if the user has ``spanner.databases.list`` - permission on the containing Cloud Spanner instance. Otherwise returns - an empty set of permissions. Calling this method on a backup that does - not exist will result in a NOT_FOUND error if the user has - ``spanner.backups.list`` permission on the containing instance. - - Example: - >>> from google.cloud import spanner_admin_database_v1 - >>> - >>> client = spanner_admin_database_v1.DatabaseAdminClient() - >>> - >>> # TODO: Initialize `resource`: - >>> resource = '' - >>> - >>> # TODO: Initialize `permissions`: - >>> permissions = [] - >>> - >>> response = client.test_iam_permissions(resource, permissions) - - Args: - resource (str): REQUIRED: The resource for which the policy detail is being requested. - See the operation documentation for the appropriate value for this field. - permissions (list[str]): The set of permissions to check for the ``resource``. Permissions - with wildcards (such as '*' or 'storage.*') are not allowed. For more - information see `IAM - Overview `__. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_admin_database_v1.types.TestIamPermissionsResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "test_iam_permissions" not in self._inner_api_calls: - self._inner_api_calls[ - "test_iam_permissions" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.test_iam_permissions, - default_retry=self._method_configs["TestIamPermissions"].retry, - default_timeout=self._method_configs["TestIamPermissions"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, permissions=permissions - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["test_iam_permissions"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_backup( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets metadata on a pending or completed ``Backup``. - - Example: - >>> from google.cloud import spanner_admin_database_v1 - >>> - >>> client = spanner_admin_database_v1.DatabaseAdminClient() - >>> - >>> name = client.backup_path('[PROJECT]', '[INSTANCE]', '[BACKUP]') - >>> - >>> response = client.get_backup(name) - - Args: - name (str): Required. Name of the backup. Values are of the form - ``projects//instances//backups/``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_admin_database_v1.types.Backup` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_backup" not in self._inner_api_calls: - self._inner_api_calls[ - "get_backup" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_backup, - default_retry=self._method_configs["GetBackup"].retry, - default_timeout=self._method_configs["GetBackup"].timeout, - client_info=self._client_info, - ) - - request = backup_pb2.GetBackupRequest(name=name) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_backup"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def update_backup( - self, - backup, - update_mask, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates a pending or completed ``Backup``. - - Example: - >>> from google.cloud import spanner_admin_database_v1 - >>> - >>> client = spanner_admin_database_v1.DatabaseAdminClient() - >>> - >>> # TODO: Initialize `backup`: - >>> backup = {} - >>> - >>> # TODO: Initialize `update_mask`: - >>> update_mask = {} - >>> - >>> response = client.update_backup(backup, update_mask) - - Args: - backup (Union[dict, ~google.cloud.spanner_admin_database_v1.types.Backup]): Required. The backup to update. ``backup.name``, and the fields to - be updated as specified by ``update_mask`` are required. Other fields - are ignored. Update is only supported for the following fields: - - - ``backup.expire_time``. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_admin_database_v1.types.Backup` - update_mask (Union[dict, ~google.cloud.spanner_admin_database_v1.types.FieldMask]): Required. A mask specifying which fields (e.g. ``expire_time``) in - the Backup resource should be updated. This mask is relative to the - Backup resource, not to the request message. The field mask must always - be specified; this prevents any future fields from being erased - accidentally by clients that do not know about them. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_admin_database_v1.types.FieldMask` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_admin_database_v1.types.Backup` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_backup" not in self._inner_api_calls: - self._inner_api_calls[ - "update_backup" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_backup, - default_retry=self._method_configs["UpdateBackup"].retry, - default_timeout=self._method_configs["UpdateBackup"].timeout, - client_info=self._client_info, - ) - - request = backup_pb2.UpdateBackupRequest(backup=backup, update_mask=update_mask) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("backup.name", backup.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_backup"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def delete_backup( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes a pending or completed ``Backup``. - - Example: - >>> from google.cloud import spanner_admin_database_v1 - >>> - >>> client = spanner_admin_database_v1.DatabaseAdminClient() - >>> - >>> name = client.backup_path('[PROJECT]', '[INSTANCE]', '[BACKUP]') - >>> - >>> client.delete_backup(name) - - Args: - name (str): Required. Name of the backup to delete. Values are of the form - ``projects//instances//backups/``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_backup" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_backup" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_backup, - default_retry=self._method_configs["DeleteBackup"].retry, - default_timeout=self._method_configs["DeleteBackup"].timeout, - client_info=self._client_info, - ) - - request = backup_pb2.DeleteBackupRequest(name=name) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_backup"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_backups( - self, - parent, - filter_=None, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists completed and pending backups. Backups returned are ordered by - ``create_time`` in descending order, starting from the most recent - ``create_time``. - - Example: - >>> from google.cloud import spanner_admin_database_v1 - >>> - >>> client = spanner_admin_database_v1.DatabaseAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # Iterate over all results - >>> for element in client.list_backups(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_backups(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. The instance to list backups from. Values are of the form - ``projects//instances/``. - filter_ (str): An expression that filters the list of returned backups. - - A filter expression consists of a field name, a comparison operator, and - a value for filtering. The value must be a string, a number, or a - boolean. The comparison operator must be one of: ``<``, ``>``, ``<=``, - ``>=``, ``!=``, ``=``, or ``:``. Colon ``:`` is the contains operator. - Filter rules are not case sensitive. - - The following fields in the ``Backup`` are eligible for filtering: - - - ``name`` - - ``database`` - - ``state`` - - ``create_time`` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) - - ``expire_time`` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) - - ``size_bytes`` - - You can combine multiple expressions by enclosing each expression in - parentheses. By default, expressions are combined with AND logic, but - you can specify AND, OR, and NOT logic explicitly. - - Here are a few examples: - - - ``name:Howl`` - The backup's name contains the string "howl". - - ``database:prod`` - The database's name contains the string "prod". - - ``state:CREATING`` - The backup is pending creation. - - ``state:READY`` - The backup is fully created and ready for use. - - ``(name:howl) AND (create_time < \"2018-03-28T14:50:00Z\")`` - The - backup name contains the string "howl" and ``create_time`` of the - backup is before 2018-03-28T14:50:00Z. - - ``expire_time < \"2018-03-28T14:50:00Z\"`` - The backup - ``expire_time`` is before 2018-03-28T14:50:00Z. - - ``size_bytes > 10000000000`` - The backup's size is greater than 10GB - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.spanner_admin_database_v1.types.Backup` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_backups" not in self._inner_api_calls: - self._inner_api_calls[ - "list_backups" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_backups, - default_retry=self._method_configs["ListBackups"].retry, - default_timeout=self._method_configs["ListBackups"].timeout, - client_info=self._client_info, - ) - - request = backup_pb2.ListBackupsRequest( - parent=parent, filter=filter_, page_size=page_size - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_backups"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="backups", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def list_database_operations( - self, - parent, - filter_=None, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists database ``longrunning-operations``. A database operation has - a name of the form - ``projects//instances//databases//operations/``. - The long-running operation ``metadata`` field type ``metadata.type_url`` - describes the type of the metadata. Operations returned include those - that have completed/failed/canceled within the last 7 days, and pending - operations. - - Example: - >>> from google.cloud import spanner_admin_database_v1 - >>> - >>> client = spanner_admin_database_v1.DatabaseAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # Iterate over all results - >>> for element in client.list_database_operations(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_database_operations(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. The instance of the database operations. Values are of the - form ``projects//instances/``. - filter_ (str): An expression that filters the list of returned operations. - - A filter expression consists of a field name, a comparison operator, and - a value for filtering. The value must be a string, a number, or a - boolean. The comparison operator must be one of: ``<``, ``>``, ``<=``, - ``>=``, ``!=``, ``=``, or ``:``. Colon ``:`` is the contains operator. - Filter rules are not case sensitive. - - The following fields in the ``Operation`` are eligible for filtering: - - - ``name`` - The name of the long-running operation - - ``done`` - False if the operation is in progress, else true. - - ``metadata.@type`` - the type of metadata. For example, the type - string for ``RestoreDatabaseMetadata`` is - ``type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata``. - - ``metadata.`` - any field in metadata.value. - - ``error`` - Error associated with the long-running operation. - - ``response.@type`` - the type of response. - - ``response.`` - any field in response.value. - - You can combine multiple expressions by enclosing each expression in - parentheses. By default, expressions are combined with AND logic. - However, you can specify AND, OR, and NOT logic explicitly. - - Here are a few examples: - - - ``done:true`` - The operation is complete. - - ``(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata) AND`` - ``(metadata.source_type:BACKUP) AND`` - ``(metadata.backup_info.backup:backup_howl) AND`` - ``(metadata.name:restored_howl) AND`` - ``(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND`` - ``(error:*)`` - Return operations where: - - - The operation's metadata type is ``RestoreDatabaseMetadata``. - - The database is restored from a backup. - - The backup name contains "backup_howl". - - The restored database's name contains "restored_howl". - - The operation started before 2018-03-28T14:50:00Z. - - The operation resulted in an error. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.spanner_admin_database_v1.types.Operation` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_database_operations" not in self._inner_api_calls: - self._inner_api_calls[ - "list_database_operations" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_database_operations, - default_retry=self._method_configs["ListDatabaseOperations"].retry, - default_timeout=self._method_configs["ListDatabaseOperations"].timeout, - client_info=self._client_info, - ) - - request = spanner_database_admin_pb2.ListDatabaseOperationsRequest( - parent=parent, filter=filter_, page_size=page_size - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_database_operations"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="operations", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def list_backup_operations( - self, - parent, - filter_=None, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists the backup ``long-running operations`` in the given instance. - A backup operation has a name of the form - ``projects//instances//backups//operations/``. - The long-running operation ``metadata`` field type ``metadata.type_url`` - describes the type of the metadata. Operations returned include those - that have completed/failed/canceled within the last 7 days, and pending - operations. Operations returned are ordered by - ``operation.metadata.value.progress.start_time`` in descending order - starting from the most recently started operation. - - Example: - >>> from google.cloud import spanner_admin_database_v1 - >>> - >>> client = spanner_admin_database_v1.DatabaseAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # Iterate over all results - >>> for element in client.list_backup_operations(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_backup_operations(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. The instance of the backup operations. Values are of the - form ``projects//instances/``. - filter_ (str): An expression that filters the list of returned backup operations. - - A filter expression consists of a field name, a comparison operator, and - a value for filtering. The value must be a string, a number, or a - boolean. The comparison operator must be one of: ``<``, ``>``, ``<=``, - ``>=``, ``!=``, ``=``, or ``:``. Colon ``:`` is the contains operator. - Filter rules are not case sensitive. - - The following fields in the ``operation`` are eligible for filtering: - - - ``name`` - The name of the long-running operation - - ``done`` - False if the operation is in progress, else true. - - ``metadata.@type`` - the type of metadata. For example, the type - string for ``CreateBackupMetadata`` is - ``type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata``. - - ``metadata.`` - any field in metadata.value. - - ``error`` - Error associated with the long-running operation. - - ``response.@type`` - the type of response. - - ``response.`` - any field in response.value. - - You can combine multiple expressions by enclosing each expression in - parentheses. By default, expressions are combined with AND logic, but - you can specify AND, OR, and NOT logic explicitly. - - Here are a few examples: - - - ``done:true`` - The operation is complete. - - ``metadata.database:prod`` - The database the backup was taken from - has a name containing the string "prod". - - ``(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND`` - ``(metadata.name:howl) AND`` - ``(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND`` - ``(error:*)`` - Returns operations where: - - - The operation's metadata type is ``CreateBackupMetadata``. - - The backup name contains the string "howl". - - The operation started before 2018-03-28T14:50:00Z. - - The operation resulted in an error. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.spanner_admin_database_v1.types.Operation` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_backup_operations" not in self._inner_api_calls: - self._inner_api_calls[ - "list_backup_operations" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_backup_operations, - default_retry=self._method_configs["ListBackupOperations"].retry, - default_timeout=self._method_configs["ListBackupOperations"].timeout, - client_info=self._client_info, - ) - - request = backup_pb2.ListBackupOperationsRequest( - parent=parent, filter=filter_, page_size=page_size - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_backup_operations"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="operations", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator diff --git a/google/cloud/spanner_admin_database_v1/gapic/database_admin_client_config.py b/google/cloud/spanner_admin_database_v1/gapic/database_admin_client_config.py deleted file mode 100644 index 936fa54ef9..0000000000 --- a/google/cloud/spanner_admin_database_v1/gapic/database_admin_client_config.py +++ /dev/null @@ -1,147 +0,0 @@ -config = { - "interfaces": { - "google.spanner.admin.database.v1.DatabaseAdmin": { - "retry_codes": { - "retry_policy_1_codes": ["UNAVAILABLE", "DEADLINE_EXCEEDED"], - "no_retry_2_codes": [], - "no_retry_codes": [], - "retry_policy_2_codes": ["UNAVAILABLE", "DEADLINE_EXCEEDED"], - "no_retry_1_codes": [], - }, - "retry_params": { - "retry_policy_1_params": { - "initial_retry_delay_millis": 1000, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 32000, - "initial_rpc_timeout_millis": 3600000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 3600000, - "total_timeout_millis": 3600000, - }, - "retry_policy_2_params": { - "initial_retry_delay_millis": 1000, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 32000, - "initial_rpc_timeout_millis": 30000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 30000, - "total_timeout_millis": 30000, - }, - "no_retry_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 0.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 0, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 0, - "total_timeout_millis": 0, - }, - "no_retry_1_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 0.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 3600000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 3600000, - "total_timeout_millis": 3600000, - }, - "no_retry_2_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 0.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 30000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 30000, - "total_timeout_millis": 30000, - }, - }, - "methods": { - "CreateDatabase": { - "timeout_millis": 3600000, - "retry_codes_name": "no_retry_1_codes", - "retry_params_name": "no_retry_1_params", - }, - "UpdateDatabaseDdl": { - "timeout_millis": 3600000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "CreateBackup": { - "timeout_millis": 3600000, - "retry_codes_name": "no_retry_1_codes", - "retry_params_name": "no_retry_1_params", - }, - "RestoreDatabase": { - "timeout_millis": 3600000, - "retry_codes_name": "no_retry_1_codes", - "retry_params_name": "no_retry_1_params", - }, - "ListDatabases": { - "timeout_millis": 3600000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "GetDatabase": { - "timeout_millis": 3600000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "DropDatabase": { - "timeout_millis": 3600000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "GetDatabaseDdl": { - "timeout_millis": 3600000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "SetIamPolicy": { - "timeout_millis": 30000, - "retry_codes_name": "no_retry_2_codes", - "retry_params_name": "no_retry_2_params", - }, - "GetIamPolicy": { - "timeout_millis": 30000, - "retry_codes_name": "retry_policy_2_codes", - "retry_params_name": "retry_policy_2_params", - }, - "TestIamPermissions": { - "timeout_millis": 30000, - "retry_codes_name": "no_retry_2_codes", - "retry_params_name": "no_retry_2_params", - }, - "GetBackup": { - "timeout_millis": 3600000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "UpdateBackup": { - "timeout_millis": 3600000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "DeleteBackup": { - "timeout_millis": 3600000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "ListBackups": { - "timeout_millis": 3600000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "ListDatabaseOperations": { - "timeout_millis": 3600000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "ListBackupOperations": { - "timeout_millis": 3600000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - }, - } - } -} diff --git a/google/cloud/spanner_admin_database_v1/gapic/enums.py b/google/cloud/spanner_admin_database_v1/gapic/enums.py deleted file mode 100644 index 575cb3a8f7..0000000000 --- a/google/cloud/spanner_admin_database_v1/gapic/enums.py +++ /dev/null @@ -1,74 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Wrappers for protocol buffer enum types.""" - -import enum - - -class RestoreSourceType(enum.IntEnum): - """ - Indicates the type of the restore source. - - Attributes: - TYPE_UNSPECIFIED (int): No restore associated. - BACKUP (int): A backup was used as the source of the restore. - """ - - TYPE_UNSPECIFIED = 0 - BACKUP = 1 - - -class Backup(object): - class State(enum.IntEnum): - """ - Indicates the current state of the backup. - - Attributes: - STATE_UNSPECIFIED (int): Not specified. - CREATING (int): The pending backup is still being created. Operations on the backup - may fail with ``FAILED_PRECONDITION`` in this state. - READY (int): The backup is complete and ready for use. - """ - - STATE_UNSPECIFIED = 0 - CREATING = 1 - READY = 2 - - -class Database(object): - class State(enum.IntEnum): - """ - Indicates the current state of the database. - - Attributes: - STATE_UNSPECIFIED (int): Not specified. - CREATING (int): The database is still being created. Operations on the database may - fail with ``FAILED_PRECONDITION`` in this state. - READY (int): The database is fully created and ready for use. - READY_OPTIMIZING (int): The database is fully created and ready for use, but is still being - optimized for performance and cannot handle full load. - - In this state, the database still references the backup it was restore - from, preventing the backup from being deleted. When optimizations are - complete, the full performance of the database will be restored, and the - database will transition to ``READY`` state. - """ - - STATE_UNSPECIFIED = 0 - CREATING = 1 - READY = 2 - READY_OPTIMIZING = 3 diff --git a/google/cloud/spanner_admin_database_v1/gapic/transports/database_admin_grpc_transport.py b/google/cloud/spanner_admin_database_v1/gapic/transports/database_admin_grpc_transport.py deleted file mode 100644 index f2fb755668..0000000000 --- a/google/cloud/spanner_admin_database_v1/gapic/transports/database_admin_grpc_transport.py +++ /dev/null @@ -1,410 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers -import google.api_core.operations_v1 - -from google.cloud.spanner_admin_database_v1.proto import spanner_database_admin_pb2_grpc - - -class DatabaseAdminGrpcTransport(object): - """gRPC transport class providing stubs for - google.spanner.admin.database.v1 DatabaseAdmin API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ( - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/spanner.admin", - ) - - def __init__( - self, channel=None, credentials=None, address="spanner.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive." - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "database_admin_stub": spanner_database_admin_pb2_grpc.DatabaseAdminStub( - channel - ) - } - - # Because this API includes a method that returns a - # long-running operation (proto: google.longrunning.Operation), - # instantiate an LRO client. - self._operations_client = google.api_core.operations_v1.OperationsClient( - channel - ) - - @classmethod - def create_channel( - cls, address="spanner.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def create_database(self): - """Return the gRPC stub for :meth:`DatabaseAdminClient.create_database`. - - Creates a new Cloud Spanner database and starts to prepare it for - serving. The returned ``long-running operation`` will have a name of the - format ``/operations/`` and can be used to - track preparation of the database. The ``metadata`` field type is - ``CreateDatabaseMetadata``. The ``response`` field type is ``Database``, - if successful. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["database_admin_stub"].CreateDatabase - - @property - def update_database_ddl(self): - """Return the gRPC stub for :meth:`DatabaseAdminClient.update_database_ddl`. - - Updates the schema of a Cloud Spanner database by - creating/altering/dropping tables, columns, indexes, etc. The returned - ``long-running operation`` will have a name of the format - ``/operations/`` and can be used to track - execution of the schema change(s). The ``metadata`` field type is - ``UpdateDatabaseDdlMetadata``. The operation has no response. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["database_admin_stub"].UpdateDatabaseDdl - - @property - def create_backup(self): - """Return the gRPC stub for :meth:`DatabaseAdminClient.create_backup`. - - Starts creating a new Cloud Spanner Backup. The returned backup - ``long-running operation`` will have a name of the format - ``projects//instances//backups//operations/`` - and can be used to track creation of the backup. The ``metadata`` field - type is ``CreateBackupMetadata``. The ``response`` field type is - ``Backup``, if successful. Cancelling the returned operation will stop - the creation and delete the backup. There can be only one pending backup - creation per database. Backup creation of different databases can run - concurrently. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["database_admin_stub"].CreateBackup - - @property - def restore_database(self): - """Return the gRPC stub for :meth:`DatabaseAdminClient.restore_database`. - - Create a new database by restoring from a completed backup. The new - database must be in the same project and in an instance with the same - instance configuration as the instance containing the backup. The - returned database ``long-running operation`` has a name of the format - ``projects//instances//databases//operations/``, - and can be used to track the progress of the operation, and to cancel - it. The ``metadata`` field type is ``RestoreDatabaseMetadata``. The - ``response`` type is ``Database``, if successful. Cancelling the - returned operation will stop the restore and delete the database. There - can be only one database being restored into an instance at a time. Once - the restore operation completes, a new restore operation can be - initiated, without waiting for the optimize operation associated with - the first restore to complete. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["database_admin_stub"].RestoreDatabase - - @property - def list_databases(self): - """Return the gRPC stub for :meth:`DatabaseAdminClient.list_databases`. - - Lists Cloud Spanner databases. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["database_admin_stub"].ListDatabases - - @property - def get_database(self): - """Return the gRPC stub for :meth:`DatabaseAdminClient.get_database`. - - Gets the state of a Cloud Spanner database. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["database_admin_stub"].GetDatabase - - @property - def drop_database(self): - """Return the gRPC stub for :meth:`DatabaseAdminClient.drop_database`. - - Drops (aka deletes) a Cloud Spanner database. Completed backups for - the database will be retained according to their ``expire_time``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["database_admin_stub"].DropDatabase - - @property - def get_database_ddl(self): - """Return the gRPC stub for :meth:`DatabaseAdminClient.get_database_ddl`. - - Returns the schema of a Cloud Spanner database as a list of - formatted DDL statements. This method does not show pending schema - updates, those may be queried using the ``Operations`` API. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["database_admin_stub"].GetDatabaseDdl - - @property - def set_iam_policy(self): - """Return the gRPC stub for :meth:`DatabaseAdminClient.set_iam_policy`. - - Sets the access control policy on a database or backup resource. - Replaces any existing policy. - - Authorization requires ``spanner.databases.setIamPolicy`` permission on - ``resource``. For backups, authorization requires - ``spanner.backups.setIamPolicy`` permission on ``resource``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["database_admin_stub"].SetIamPolicy - - @property - def get_iam_policy(self): - """Return the gRPC stub for :meth:`DatabaseAdminClient.get_iam_policy`. - - Gets the access control policy for a database or backup resource. - Returns an empty policy if a database or backup exists but does not have - a policy set. - - Authorization requires ``spanner.databases.getIamPolicy`` permission on - ``resource``. For backups, authorization requires - ``spanner.backups.getIamPolicy`` permission on ``resource``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["database_admin_stub"].GetIamPolicy - - @property - def test_iam_permissions(self): - """Return the gRPC stub for :meth:`DatabaseAdminClient.test_iam_permissions`. - - Returns permissions that the caller has on the specified database or - backup resource. - - Attempting this RPC on a non-existent Cloud Spanner database will result - in a NOT_FOUND error if the user has ``spanner.databases.list`` - permission on the containing Cloud Spanner instance. Otherwise returns - an empty set of permissions. Calling this method on a backup that does - not exist will result in a NOT_FOUND error if the user has - ``spanner.backups.list`` permission on the containing instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["database_admin_stub"].TestIamPermissions - - @property - def get_backup(self): - """Return the gRPC stub for :meth:`DatabaseAdminClient.get_backup`. - - Gets metadata on a pending or completed ``Backup``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["database_admin_stub"].GetBackup - - @property - def update_backup(self): - """Return the gRPC stub for :meth:`DatabaseAdminClient.update_backup`. - - Updates a pending or completed ``Backup``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["database_admin_stub"].UpdateBackup - - @property - def delete_backup(self): - """Return the gRPC stub for :meth:`DatabaseAdminClient.delete_backup`. - - Deletes a pending or completed ``Backup``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["database_admin_stub"].DeleteBackup - - @property - def list_backups(self): - """Return the gRPC stub for :meth:`DatabaseAdminClient.list_backups`. - - Lists completed and pending backups. Backups returned are ordered by - ``create_time`` in descending order, starting from the most recent - ``create_time``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["database_admin_stub"].ListBackups - - @property - def list_database_operations(self): - """Return the gRPC stub for :meth:`DatabaseAdminClient.list_database_operations`. - - Lists database ``longrunning-operations``. A database operation has - a name of the form - ``projects//instances//databases//operations/``. - The long-running operation ``metadata`` field type ``metadata.type_url`` - describes the type of the metadata. Operations returned include those - that have completed/failed/canceled within the last 7 days, and pending - operations. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["database_admin_stub"].ListDatabaseOperations - - @property - def list_backup_operations(self): - """Return the gRPC stub for :meth:`DatabaseAdminClient.list_backup_operations`. - - Lists the backup ``long-running operations`` in the given instance. - A backup operation has a name of the form - ``projects//instances//backups//operations/``. - The long-running operation ``metadata`` field type ``metadata.type_url`` - describes the type of the metadata. Operations returned include those - that have completed/failed/canceled within the last 7 days, and pending - operations. Operations returned are ordered by - ``operation.metadata.value.progress.start_time`` in descending order - starting from the most recently started operation. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["database_admin_stub"].ListBackupOperations diff --git a/google/cloud/spanner_admin_database_v1/proto/__init__.py b/google/cloud/spanner_admin_database_v1/proto/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/google/cloud/spanner_admin_database_v1/proto/backup_pb2.py b/google/cloud/spanner_admin_database_v1/proto/backup_pb2.py deleted file mode 100644 index 707412b7da..0000000000 --- a/google/cloud/spanner_admin_database_v1/proto/backup_pb2.py +++ /dev/null @@ -1,1407 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/spanner_admin_database_v1/proto/backup.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from google.cloud.spanner_admin_database_v1.proto import ( - common_pb2 as google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_common__pb2, -) -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/spanner_admin_database_v1/proto/backup.proto", - package="google.spanner.admin.database.v1", - syntax="proto3", - serialized_options=b"\n$com.google.spanner.admin.database.v1B\013BackupProtoP\001ZHgoogle.golang.org/genproto/googleapis/spanner/admin/database/v1;database\252\002&Google.Cloud.Spanner.Admin.Database.V1\312\002&Google\\Cloud\\Spanner\\Admin\\Database\\V1\352\002+Google::Cloud::Spanner::Admin::Database::V1", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n9google/cloud/spanner_admin_database_v1/proto/backup.proto\x12 google.spanner.admin.database.v1\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a#google/longrunning/operations.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x39google/cloud/spanner_admin_database_v1/proto/common.proto\x1a\x1cgoogle/api/annotations.proto"\xcd\x03\n\x06\x42\x61\x63kup\x12\x36\n\x08\x64\x61tabase\x18\x02 \x01(\tB$\xfa\x41!\n\x1fspanner.googleapis.com/Database\x12/\n\x0b\x65xpire_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x34\n\x0b\x63reate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x17\n\nsize_bytes\x18\x05 \x01(\x03\x42\x03\xe0\x41\x03\x12\x42\n\x05state\x18\x06 \x01(\x0e\x32..google.spanner.admin.database.v1.Backup.StateB\x03\xe0\x41\x03\x12"\n\x15referencing_databases\x18\x07 \x03(\tB\x03\xe0\x41\x03"7\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0c\n\x08\x43REATING\x10\x01\x12\t\n\x05READY\x10\x02:\\\xea\x41Y\n\x1dspanner.googleapis.com/Backup\x12\x38projects/{project}/instances/{instance}/backups/{backup}"\xa5\x01\n\x13\x43reateBackupRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1fspanner.googleapis.com/Instance\x12\x16\n\tbackup_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12=\n\x06\x62\x61\x63kup\x18\x03 \x01(\x0b\x32(.google.spanner.admin.database.v1.BackupB\x03\xe0\x41\x02"\xae\x01\n\x14\x43reateBackupMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08\x64\x61tabase\x18\x02 \x01(\t\x12\x45\n\x08progress\x18\x03 \x01(\x0b\x32\x33.google.spanner.admin.database.v1.OperationProgress\x12/\n\x0b\x63\x61ncel_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\x8a\x01\n\x13UpdateBackupRequest\x12=\n\x06\x62\x61\x63kup\x18\x01 \x01(\x0b\x32(.google.spanner.admin.database.v1.BackupB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02"G\n\x10GetBackupRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1dspanner.googleapis.com/Backup"J\n\x13\x44\x65leteBackupRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1dspanner.googleapis.com/Backup"\x84\x01\n\x12ListBackupsRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1fspanner.googleapis.com/Instance\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x04 \x01(\t"i\n\x13ListBackupsResponse\x12\x39\n\x07\x62\x61\x63kups\x18\x01 \x03(\x0b\x32(.google.spanner.admin.database.v1.Backup\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"\x8d\x01\n\x1bListBackupOperationsRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1fspanner.googleapis.com/Instance\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x04 \x01(\t"j\n\x1cListBackupOperationsResponse\x12\x31\n\noperations\x18\x01 \x03(\x0b\x32\x1d.google.longrunning.Operation\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"f\n\nBackupInfo\x12\x0e\n\x06\x62\x61\x63kup\x18\x01 \x01(\t\x12/\n\x0b\x63reate_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x17\n\x0fsource_database\x18\x03 \x01(\tB\xff\x01\n$com.google.spanner.admin.database.v1B\x0b\x42\x61\x63kupProtoP\x01ZHgoogle.golang.org/genproto/googleapis/spanner/admin/database/v1;database\xaa\x02&Google.Cloud.Spanner.Admin.Database.V1\xca\x02&Google\\Cloud\\Spanner\\Admin\\Database\\V1\xea\x02+Google::Cloud::Spanner::Admin::Database::V1b\x06proto3', - dependencies=[ - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, - google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_common__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_BACKUP_STATE = _descriptor.EnumDescriptor( - name="State", - full_name="google.spanner.admin.database.v1.Backup.State", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="STATE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="CREATING", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="READY", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=661, - serialized_end=716, -) -_sym_db.RegisterEnumDescriptor(_BACKUP_STATE) - - -_BACKUP = _descriptor.Descriptor( - name="Backup", - full_name="google.spanner.admin.database.v1.Backup", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="database", - full_name="google.spanner.admin.database.v1.Backup.database", - index=0, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\372A!\n\037spanner.googleapis.com/Database", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="expire_time", - full_name="google.spanner.admin.database.v1.Backup.expire_time", - index=1, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="name", - full_name="google.spanner.admin.database.v1.Backup.name", - index=2, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="create_time", - full_name="google.spanner.admin.database.v1.Backup.create_time", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="size_bytes", - full_name="google.spanner.admin.database.v1.Backup.size_bytes", - index=4, - number=5, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="state", - full_name="google.spanner.admin.database.v1.Backup.state", - index=5, - number=6, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="referencing_databases", - full_name="google.spanner.admin.database.v1.Backup.referencing_databases", - index=6, - number=7, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_BACKUP_STATE], - serialized_options=b"\352AY\n\035spanner.googleapis.com/Backup\0228projects/{project}/instances/{instance}/backups/{backup}", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=349, - serialized_end=810, -) - - -_CREATEBACKUPREQUEST = _descriptor.Descriptor( - name="CreateBackupRequest", - full_name="google.spanner.admin.database.v1.CreateBackupRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.spanner.admin.database.v1.CreateBackupRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A!\n\037spanner.googleapis.com/Instance", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="backup_id", - full_name="google.spanner.admin.database.v1.CreateBackupRequest.backup_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="backup", - full_name="google.spanner.admin.database.v1.CreateBackupRequest.backup", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=813, - serialized_end=978, -) - - -_CREATEBACKUPMETADATA = _descriptor.Descriptor( - name="CreateBackupMetadata", - full_name="google.spanner.admin.database.v1.CreateBackupMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.spanner.admin.database.v1.CreateBackupMetadata.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="database", - full_name="google.spanner.admin.database.v1.CreateBackupMetadata.database", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="progress", - full_name="google.spanner.admin.database.v1.CreateBackupMetadata.progress", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cancel_time", - full_name="google.spanner.admin.database.v1.CreateBackupMetadata.cancel_time", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=981, - serialized_end=1155, -) - - -_UPDATEBACKUPREQUEST = _descriptor.Descriptor( - name="UpdateBackupRequest", - full_name="google.spanner.admin.database.v1.UpdateBackupRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="backup", - full_name="google.spanner.admin.database.v1.UpdateBackupRequest.backup", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="update_mask", - full_name="google.spanner.admin.database.v1.UpdateBackupRequest.update_mask", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1158, - serialized_end=1296, -) - - -_GETBACKUPREQUEST = _descriptor.Descriptor( - name="GetBackupRequest", - full_name="google.spanner.admin.database.v1.GetBackupRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.spanner.admin.database.v1.GetBackupRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035spanner.googleapis.com/Backup", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1298, - serialized_end=1369, -) - - -_DELETEBACKUPREQUEST = _descriptor.Descriptor( - name="DeleteBackupRequest", - full_name="google.spanner.admin.database.v1.DeleteBackupRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.spanner.admin.database.v1.DeleteBackupRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035spanner.googleapis.com/Backup", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1371, - serialized_end=1445, -) - - -_LISTBACKUPSREQUEST = _descriptor.Descriptor( - name="ListBackupsRequest", - full_name="google.spanner.admin.database.v1.ListBackupsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.spanner.admin.database.v1.ListBackupsRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A!\n\037spanner.googleapis.com/Instance", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="filter", - full_name="google.spanner.admin.database.v1.ListBackupsRequest.filter", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.spanner.admin.database.v1.ListBackupsRequest.page_size", - index=2, - number=3, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.spanner.admin.database.v1.ListBackupsRequest.page_token", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1448, - serialized_end=1580, -) - - -_LISTBACKUPSRESPONSE = _descriptor.Descriptor( - name="ListBackupsResponse", - full_name="google.spanner.admin.database.v1.ListBackupsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="backups", - full_name="google.spanner.admin.database.v1.ListBackupsResponse.backups", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.spanner.admin.database.v1.ListBackupsResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1582, - serialized_end=1687, -) - - -_LISTBACKUPOPERATIONSREQUEST = _descriptor.Descriptor( - name="ListBackupOperationsRequest", - full_name="google.spanner.admin.database.v1.ListBackupOperationsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.spanner.admin.database.v1.ListBackupOperationsRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A!\n\037spanner.googleapis.com/Instance", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="filter", - full_name="google.spanner.admin.database.v1.ListBackupOperationsRequest.filter", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.spanner.admin.database.v1.ListBackupOperationsRequest.page_size", - index=2, - number=3, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.spanner.admin.database.v1.ListBackupOperationsRequest.page_token", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1690, - serialized_end=1831, -) - - -_LISTBACKUPOPERATIONSRESPONSE = _descriptor.Descriptor( - name="ListBackupOperationsResponse", - full_name="google.spanner.admin.database.v1.ListBackupOperationsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="operations", - full_name="google.spanner.admin.database.v1.ListBackupOperationsResponse.operations", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.spanner.admin.database.v1.ListBackupOperationsResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1833, - serialized_end=1939, -) - - -_BACKUPINFO = _descriptor.Descriptor( - name="BackupInfo", - full_name="google.spanner.admin.database.v1.BackupInfo", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="backup", - full_name="google.spanner.admin.database.v1.BackupInfo.backup", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="create_time", - full_name="google.spanner.admin.database.v1.BackupInfo.create_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="source_database", - full_name="google.spanner.admin.database.v1.BackupInfo.source_database", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1941, - serialized_end=2043, -) - -_BACKUP.fields_by_name[ - "expire_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_BACKUP.fields_by_name[ - "create_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_BACKUP.fields_by_name["state"].enum_type = _BACKUP_STATE -_BACKUP_STATE.containing_type = _BACKUP -_CREATEBACKUPREQUEST.fields_by_name["backup"].message_type = _BACKUP -_CREATEBACKUPMETADATA.fields_by_name[ - "progress" -].message_type = ( - google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_common__pb2._OPERATIONPROGRESS -) -_CREATEBACKUPMETADATA.fields_by_name[ - "cancel_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_UPDATEBACKUPREQUEST.fields_by_name["backup"].message_type = _BACKUP -_UPDATEBACKUPREQUEST.fields_by_name[ - "update_mask" -].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -_LISTBACKUPSRESPONSE.fields_by_name["backups"].message_type = _BACKUP -_LISTBACKUPOPERATIONSRESPONSE.fields_by_name[ - "operations" -].message_type = google_dot_longrunning_dot_operations__pb2._OPERATION -_BACKUPINFO.fields_by_name[ - "create_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -DESCRIPTOR.message_types_by_name["Backup"] = _BACKUP -DESCRIPTOR.message_types_by_name["CreateBackupRequest"] = _CREATEBACKUPREQUEST -DESCRIPTOR.message_types_by_name["CreateBackupMetadata"] = _CREATEBACKUPMETADATA -DESCRIPTOR.message_types_by_name["UpdateBackupRequest"] = _UPDATEBACKUPREQUEST -DESCRIPTOR.message_types_by_name["GetBackupRequest"] = _GETBACKUPREQUEST -DESCRIPTOR.message_types_by_name["DeleteBackupRequest"] = _DELETEBACKUPREQUEST -DESCRIPTOR.message_types_by_name["ListBackupsRequest"] = _LISTBACKUPSREQUEST -DESCRIPTOR.message_types_by_name["ListBackupsResponse"] = _LISTBACKUPSRESPONSE -DESCRIPTOR.message_types_by_name[ - "ListBackupOperationsRequest" -] = _LISTBACKUPOPERATIONSREQUEST -DESCRIPTOR.message_types_by_name[ - "ListBackupOperationsResponse" -] = _LISTBACKUPOPERATIONSRESPONSE -DESCRIPTOR.message_types_by_name["BackupInfo"] = _BACKUPINFO -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -Backup = _reflection.GeneratedProtocolMessageType( - "Backup", - (_message.Message,), - { - "DESCRIPTOR": _BACKUP, - "__module__": "google.cloud.spanner_admin_database_v1.proto.backup_pb2", - "__doc__": """A backup of a Cloud Spanner database. - - Attributes: - database: - Required for the [CreateBackup][google.spanner.admin.database. - v1.DatabaseAdmin.CreateBackup] operation. Name of the database - from which this backup was created. This needs to be in the - same instance as the backup. Values are of the form ``projects - //instances//databases/``. - expire_time: - Required for the [CreateBackup][google.spanner.admin.database. - v1.DatabaseAdmin.CreateBackup] operation. The expiration time - of the backup, with microseconds granularity that must be at - least 6 hours and at most 366 days from the time the - CreateBackup request is processed. Once the ``expire_time`` - has passed, the backup is eligible to be automatically deleted - by Cloud Spanner to free the resources used by the backup. - name: - Output only for the [CreateBackup][google.spanner.admin.databa - se.v1.DatabaseAdmin.CreateBackup] operation. Required for the - [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin. - UpdateBackup] operation. A globally unique identifier for the - backup which cannot be changed. Values are of the form ``proje - cts//instances//backups/[a-z][a-z0-9_\-]*[a - -z0-9]`` The final segment of the name must be between 2 and - 60 characters in length. The backup is stored in the - location(s) specified in the instance configuration of the - instance containing the backup, identified by the prefix of - the backup name of the form - ``projects//instances/``. - create_time: - Output only. The backup will contain an externally consistent - copy of the database at the timestamp specified by - ``create_time``. ``create_time`` is approximately the time the - [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin. - CreateBackup] request is received. - size_bytes: - Output only. Size of the backup in bytes. - state: - Output only. The current state of the backup. - referencing_databases: - Output only. The names of the restored databases that - reference the backup. The database names are of the form ``pro - jects//instances//databases/``. - Referencing databases may exist in different instances. The - existence of any referencing database prevents the backup from - being deleted. When a restored database from the backup enters - the ``READY`` state, the reference to the backup is removed. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.Backup) - }, -) -_sym_db.RegisterMessage(Backup) - -CreateBackupRequest = _reflection.GeneratedProtocolMessageType( - "CreateBackupRequest", - (_message.Message,), - { - "DESCRIPTOR": _CREATEBACKUPREQUEST, - "__module__": "google.cloud.spanner_admin_database_v1.proto.backup_pb2", - "__doc__": """The request for [CreateBackup][google.spanner.admin.database.v1.Databa - seAdmin.CreateBackup]. - - Attributes: - parent: - Required. The name of the instance in which the backup will be - created. This must be the same instance that contains the - database the backup will be created from. The backup will be - stored in the location(s) specified in the instance - configuration of this instance. Values are of the form - ``projects//instances/``. - backup_id: - Required. The id of the backup to be created. The - ``backup_id`` appended to ``parent`` forms the full backup - name of the form ``projects//instances//bac - kups/``. - backup: - Required. The backup to create. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.CreateBackupRequest) - }, -) -_sym_db.RegisterMessage(CreateBackupRequest) - -CreateBackupMetadata = _reflection.GeneratedProtocolMessageType( - "CreateBackupMetadata", - (_message.Message,), - { - "DESCRIPTOR": _CREATEBACKUPMETADATA, - "__module__": "google.cloud.spanner_admin_database_v1.proto.backup_pb2", - "__doc__": """Metadata type for the operation returned by [CreateBackup][google.span - ner.admin.database.v1.DatabaseAdmin.CreateBackup]. - - Attributes: - name: - The name of the backup being created. - database: - The name of the database the backup is created from. - progress: - The progress of the [CreateBackup][google.spanner.admin.databa - se.v1.DatabaseAdmin.CreateBackup] operation. - cancel_time: - The time at which cancellation of this operation was received. - [Operations.CancelOperation][google.longrunning.Operations.Can - celOperation] starts asynchronous cancellation on a long- - running operation. The server makes a best effort to cancel - the operation, but success is not guaranteed. Clients can use - [Operations.GetOperation][google.longrunning.Operations.GetOpe - ration] or other methods to check whether the cancellation - succeeded or whether the operation completed despite - cancellation. On successful cancellation, the operation is not - deleted; instead, it becomes an operation with an - [Operation.error][] value with a - [google.rpc.Status.code][google.rpc.Status.code] of 1, - corresponding to ``Code.CANCELLED``. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.CreateBackupMetadata) - }, -) -_sym_db.RegisterMessage(CreateBackupMetadata) - -UpdateBackupRequest = _reflection.GeneratedProtocolMessageType( - "UpdateBackupRequest", - (_message.Message,), - { - "DESCRIPTOR": _UPDATEBACKUPREQUEST, - "__module__": "google.cloud.spanner_admin_database_v1.proto.backup_pb2", - "__doc__": """The request for [UpdateBackup][google.spanner.admin.database.v1.Databa - seAdmin.UpdateBackup]. - - Attributes: - backup: - Required. The backup to update. ``backup.name``, and the - fields to be updated as specified by ``update_mask`` are - required. Other fields are ignored. Update is only supported - for the following fields: \* ``backup.expire_time``. - update_mask: - Required. A mask specifying which fields (e.g. - ``expire_time``) in the Backup resource should be updated. - This mask is relative to the Backup resource, not to the - request message. The field mask must always be specified; this - prevents any future fields from being erased accidentally by - clients that do not know about them. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.UpdateBackupRequest) - }, -) -_sym_db.RegisterMessage(UpdateBackupRequest) - -GetBackupRequest = _reflection.GeneratedProtocolMessageType( - "GetBackupRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETBACKUPREQUEST, - "__module__": "google.cloud.spanner_admin_database_v1.proto.backup_pb2", - "__doc__": """The request for - [GetBackup][google.spanner.admin.database.v1.DatabaseAdmin.GetBackup]. - - Attributes: - name: - Required. Name of the backup. Values are of the form - ``projects//instances//backups/``. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.GetBackupRequest) - }, -) -_sym_db.RegisterMessage(GetBackupRequest) - -DeleteBackupRequest = _reflection.GeneratedProtocolMessageType( - "DeleteBackupRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETEBACKUPREQUEST, - "__module__": "google.cloud.spanner_admin_database_v1.proto.backup_pb2", - "__doc__": """The request for [DeleteBackup][google.spanner.admin.database.v1.Databa - seAdmin.DeleteBackup]. - - Attributes: - name: - Required. Name of the backup to delete. Values are of the form - ``projects//instances//backups/``. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.DeleteBackupRequest) - }, -) -_sym_db.RegisterMessage(DeleteBackupRequest) - -ListBackupsRequest = _reflection.GeneratedProtocolMessageType( - "ListBackupsRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTBACKUPSREQUEST, - "__module__": "google.cloud.spanner_admin_database_v1.proto.backup_pb2", - "__doc__": """The request for [ListBackups][google.spanner.admin.database.v1.Databas - eAdmin.ListBackups]. - - Attributes: - parent: - Required. The instance to list backups from. Values are of the - form ``projects//instances/``. - filter: - An expression that filters the list of returned backups. A - filter expression consists of a field name, a comparison - operator, and a value for filtering. The value must be a - string, a number, or a boolean. The comparison operator must - be one of: ``<``, ``>``, ``<=``, ``>=``, ``!=``, ``=``, or - ``:``. Colon ``:`` is the contains operator. Filter rules are - not case sensitive. The following fields in the - [Backup][google.spanner.admin.database.v1.Backup] are eligible - for filtering: - ``name`` - ``database`` - ``state`` - - ``create_time`` (and values are of the format YYYY-MM- - DDTHH:MM:SSZ) - ``expire_time`` (and values are of the format - YYYY-MM-DDTHH:MM:SSZ) - ``size_bytes`` You can combine - multiple expressions by enclosing each expression in - parentheses. By default, expressions are combined with AND - logic, but you can specify AND, OR, and NOT logic explicitly. - Here are a few examples: - ``name:Howl`` - The backup’s name - contains the string “howl”. - ``database:prod`` - The - database’s name contains the string “prod”. - - ``state:CREATING`` - The backup is pending creation. - - ``state:READY`` - The backup is fully created and ready for - use. - ``(name:howl) AND (create_time < - \"2018-03-28T14:50:00Z\")`` - The backup name contains the - string “howl” and ``create_time`` of the backup is before - 2018-03-28T14:50:00Z. - ``expire_time < - \"2018-03-28T14:50:00Z\"`` - The backup ``expire_time`` is - before 2018-03-28T14:50:00Z. - ``size_bytes > 10000000000`` - - The backup’s size is greater than 10GB - page_size: - Number of backups to be returned in the response. If 0 or - less, defaults to the server’s maximum allowed page size. - page_token: - If non-empty, ``page_token`` should contain a [next_page_token - ][google.spanner.admin.database.v1.ListBackupsResponse.next_pa - ge_token] from a previous [ListBackupsResponse][google.spanner - .admin.database.v1.ListBackupsResponse] to the same ``parent`` - and with the same ``filter``. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.ListBackupsRequest) - }, -) -_sym_db.RegisterMessage(ListBackupsRequest) - -ListBackupsResponse = _reflection.GeneratedProtocolMessageType( - "ListBackupsResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTBACKUPSRESPONSE, - "__module__": "google.cloud.spanner_admin_database_v1.proto.backup_pb2", - "__doc__": """The response for [ListBackups][google.spanner.admin.database.v1.Databa - seAdmin.ListBackups]. - - Attributes: - backups: - The list of matching backups. Backups returned are ordered by - ``create_time`` in descending order, starting from the most - recent ``create_time``. - next_page_token: - \ ``next_page_token`` can be sent in a subsequent [ListBackups - ][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups] - call to fetch more of the matching backups. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.ListBackupsResponse) - }, -) -_sym_db.RegisterMessage(ListBackupsResponse) - -ListBackupOperationsRequest = _reflection.GeneratedProtocolMessageType( - "ListBackupOperationsRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTBACKUPOPERATIONSREQUEST, - "__module__": "google.cloud.spanner_admin_database_v1.proto.backup_pb2", - "__doc__": """The request for [ListBackupOperations][google.spanner.admin.database.v - 1.DatabaseAdmin.ListBackupOperations]. - - Attributes: - parent: - Required. The instance of the backup operations. Values are of - the form ``projects//instances/``. - filter: - An expression that filters the list of returned backup - operations. A filter expression consists of a field name, a - comparison operator, and a value for filtering. The value must - be a string, a number, or a boolean. The comparison operator - must be one of: ``<``, ``>``, ``<=``, ``>=``, ``!=``, ``=``, - or ``:``. Colon ``:`` is the contains operator. Filter rules - are not case sensitive. The following fields in the - [operation][google.longrunning.Operation] are eligible for - filtering: - ``name`` - The name of the long-running - operation - ``done`` - False if the operation is in progress, - else true. - ``metadata.@type`` - the type of metadata. For - example, the type string for [CreateBackupMetadata][goog - le.spanner.admin.database.v1.CreateBackupMetadata] is `` - type.googleapis.com/google.spanner.admin.database.v1.CreateBac - kupMetadata``. - ``metadata.`` - any field in - metadata.value. - ``error`` - Error associated with the long- - running operation. - ``response.@type`` - the type of - response. - ``response.`` - any field in - response.value. You can combine multiple expressions by - enclosing each expression in parentheses. By default, - expressions are combined with AND logic, but you can specify - AND, OR, and NOT logic explicitly. Here are a few examples: - - ``done:true`` - The operation is complete. - - ``metadata.database:prod`` - The database the backup was taken - from has a name containing the string “prod”. - ``(metadat - a.@type=type.googleapis.com/google.spanner.admin.database.v1.C - reateBackupMetadata) AND`` ``(metadata.name:howl) AND`` - ``(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") - AND`` ``(error:*)`` - Returns operations where: - The - operation’s metadata type is [CreateBackupMetadata][goog - le.spanner.admin.database.v1.CreateBackupMetadata]. - The - backup name contains the string “howl”. - The operation - started before 2018-03-28T14:50:00Z. - The operation - resulted in an error. - page_size: - Number of operations to be returned in the response. If 0 or - less, defaults to the server’s maximum allowed page size. - page_token: - If non-empty, ``page_token`` should contain a [next_page_token - ][google.spanner.admin.database.v1.ListBackupOperationsRespons - e.next_page_token] from a previous [ListBackupOperationsRespon - se][google.spanner.admin.database.v1.ListBackupOperationsRespo - nse] to the same ``parent`` and with the same ``filter``. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.ListBackupOperationsRequest) - }, -) -_sym_db.RegisterMessage(ListBackupOperationsRequest) - -ListBackupOperationsResponse = _reflection.GeneratedProtocolMessageType( - "ListBackupOperationsResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTBACKUPOPERATIONSRESPONSE, - "__module__": "google.cloud.spanner_admin_database_v1.proto.backup_pb2", - "__doc__": """The response for [ListBackupOperations][google.spanner.admin.database. - v1.DatabaseAdmin.ListBackupOperations]. - - Attributes: - operations: - The list of matching backup [long-running - operations][google.longrunning.Operation]. Each operation’s - name will be prefixed by the backup’s name and the operation’s - [metadata][google.longrunning.Operation.metadata] will be of - type [CreateBackupMetadata][google.spanner.admin.database.v1.C - reateBackupMetadata]. Operations returned include those that - are pending or have completed/failed/canceled within the last - 7 days. Operations returned are ordered by - ``operation.metadata.value.progress.start_time`` in descending - order starting from the most recently started operation. - next_page_token: - \ ``next_page_token`` can be sent in a subsequent [ListBackupO - perations][google.spanner.admin.database.v1.DatabaseAdmin.List - BackupOperations] call to fetch more of the matching metadata. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.ListBackupOperationsResponse) - }, -) -_sym_db.RegisterMessage(ListBackupOperationsResponse) - -BackupInfo = _reflection.GeneratedProtocolMessageType( - "BackupInfo", - (_message.Message,), - { - "DESCRIPTOR": _BACKUPINFO, - "__module__": "google.cloud.spanner_admin_database_v1.proto.backup_pb2", - "__doc__": """Information about a backup. - - Attributes: - backup: - Name of the backup. - create_time: - The backup contains an externally consistent copy of - ``source_database`` at the timestamp specified by - ``create_time``. - source_database: - Name of the database the backup was created from. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.BackupInfo) - }, -) -_sym_db.RegisterMessage(BackupInfo) - - -DESCRIPTOR._options = None -_BACKUP.fields_by_name["database"]._options = None -_BACKUP.fields_by_name["create_time"]._options = None -_BACKUP.fields_by_name["size_bytes"]._options = None -_BACKUP.fields_by_name["state"]._options = None -_BACKUP.fields_by_name["referencing_databases"]._options = None -_BACKUP._options = None -_CREATEBACKUPREQUEST.fields_by_name["parent"]._options = None -_CREATEBACKUPREQUEST.fields_by_name["backup_id"]._options = None -_CREATEBACKUPREQUEST.fields_by_name["backup"]._options = None -_UPDATEBACKUPREQUEST.fields_by_name["backup"]._options = None -_UPDATEBACKUPREQUEST.fields_by_name["update_mask"]._options = None -_GETBACKUPREQUEST.fields_by_name["name"]._options = None -_DELETEBACKUPREQUEST.fields_by_name["name"]._options = None -_LISTBACKUPSREQUEST.fields_by_name["parent"]._options = None -_LISTBACKUPOPERATIONSREQUEST.fields_by_name["parent"]._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/spanner_admin_database_v1/proto/backup_pb2_grpc.py b/google/cloud/spanner_admin_database_v1/proto/backup_pb2_grpc.py deleted file mode 100644 index 8a9393943b..0000000000 --- a/google/cloud/spanner_admin_database_v1/proto/backup_pb2_grpc.py +++ /dev/null @@ -1,3 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc diff --git a/google/cloud/spanner_admin_database_v1/proto/common_pb2.py b/google/cloud/spanner_admin_database_v1/proto/common_pb2.py deleted file mode 100644 index b4e89476eb..0000000000 --- a/google/cloud/spanner_admin_database_v1/proto/common_pb2.py +++ /dev/null @@ -1,148 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/spanner_admin_database_v1/proto/common.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/spanner_admin_database_v1/proto/common.proto", - package="google.spanner.admin.database.v1", - syntax="proto3", - serialized_options=b"\n$com.google.spanner.admin.database.v1B\013CommonProtoP\001ZHgoogle.golang.org/genproto/googleapis/spanner/admin/database/v1;database\252\002&Google.Cloud.Spanner.Admin.Database.V1\312\002&Google\\Cloud\\Spanner\\Admin\\Database\\V1\352\002+Google::Cloud::Spanner::Admin::Database::V1", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n9google/cloud/spanner_admin_database_v1/proto/common.proto\x12 google.spanner.admin.database.v1\x1a\x1fgoogle/api/field_behavior.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"\x8b\x01\n\x11OperationProgress\x12\x18\n\x10progress_percent\x18\x01 \x01(\x05\x12.\n\nstart_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\xff\x01\n$com.google.spanner.admin.database.v1B\x0b\x43ommonProtoP\x01ZHgoogle.golang.org/genproto/googleapis/spanner/admin/database/v1;database\xaa\x02&Google.Cloud.Spanner.Admin.Database.V1\xca\x02&Google\\Cloud\\Spanner\\Admin\\Database\\V1\xea\x02+Google::Cloud::Spanner::Admin::Database::V1b\x06proto3', - dependencies=[ - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_OPERATIONPROGRESS = _descriptor.Descriptor( - name="OperationProgress", - full_name="google.spanner.admin.database.v1.OperationProgress", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="progress_percent", - full_name="google.spanner.admin.database.v1.OperationProgress.progress_percent", - index=0, - number=1, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="start_time", - full_name="google.spanner.admin.database.v1.OperationProgress.start_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_time", - full_name="google.spanner.admin.database.v1.OperationProgress.end_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=192, - serialized_end=331, -) - -_OPERATIONPROGRESS.fields_by_name[ - "start_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_OPERATIONPROGRESS.fields_by_name[ - "end_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -DESCRIPTOR.message_types_by_name["OperationProgress"] = _OPERATIONPROGRESS -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -OperationProgress = _reflection.GeneratedProtocolMessageType( - "OperationProgress", - (_message.Message,), - { - "DESCRIPTOR": _OPERATIONPROGRESS, - "__module__": "google.cloud.spanner_admin_database_v1.proto.common_pb2", - "__doc__": """Encapsulates progress related information for a Cloud Spanner long - running operation. - - Attributes: - progress_percent: - Percent completion of the operation. Values are between 0 and - 100 inclusive. - start_time: - Time the request was received. - end_time: - If set, the time at which this operation failed or was - completed successfully. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.OperationProgress) - }, -) -_sym_db.RegisterMessage(OperationProgress) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/spanner_admin_database_v1/proto/common_pb2_grpc.py b/google/cloud/spanner_admin_database_v1/proto/common_pb2_grpc.py deleted file mode 100644 index 8a9393943b..0000000000 --- a/google/cloud/spanner_admin_database_v1/proto/common_pb2_grpc.py +++ /dev/null @@ -1,3 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc diff --git a/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin_pb2.py b/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin_pb2.py deleted file mode 100644 index f0accdbecd..0000000000 --- a/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin_pb2.py +++ /dev/null @@ -1,2145 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/spanner_admin_database_v1/proto/spanner_database_admin.proto - -from google.protobuf.internal import enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 -from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from google.cloud.spanner_admin_database_v1.proto import ( - backup_pb2 as google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_backup__pb2, -) -from google.cloud.spanner_admin_database_v1.proto import ( - common_pb2 as google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_common__pb2, -) - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/spanner_admin_database_v1/proto/spanner_database_admin.proto", - package="google.spanner.admin.database.v1", - syntax="proto3", - serialized_options=b"\n$com.google.spanner.admin.database.v1B\031SpannerDatabaseAdminProtoP\001ZHgoogle.golang.org/genproto/googleapis/spanner/admin/database/v1;database\252\002&Google.Cloud.Spanner.Admin.Database.V1\312\002&Google\\Cloud\\Spanner\\Admin\\Database\\V1\352\002+Google::Cloud::Spanner::Admin::Database::V1\352AJ\n\037spanner.googleapis.com/Instance\022'projects/{project}/instances/{instance}", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\nIgoogle/cloud/spanner_admin_database_v1/proto/spanner_database_admin.proto\x12 google.spanner.admin.database.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x39google/cloud/spanner_admin_database_v1/proto/backup.proto\x1a\x39google/cloud/spanner_admin_database_v1/proto/common.proto"\xab\x01\n\x0bRestoreInfo\x12H\n\x0bsource_type\x18\x01 \x01(\x0e\x32\x33.google.spanner.admin.database.v1.RestoreSourceType\x12\x43\n\x0b\x62\x61\x63kup_info\x18\x02 \x01(\x0b\x32,.google.spanner.admin.database.v1.BackupInfoH\x00\x42\r\n\x0bsource_info"\x96\x03\n\x08\x44\x61tabase\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x44\n\x05state\x18\x02 \x01(\x0e\x32\x30.google.spanner.admin.database.v1.Database.StateB\x03\xe0\x41\x03\x12\x34\n\x0b\x63reate_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12H\n\x0crestore_info\x18\x04 \x01(\x0b\x32-.google.spanner.admin.database.v1.RestoreInfoB\x03\xe0\x41\x03"M\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0c\n\x08\x43REATING\x10\x01\x12\t\n\x05READY\x10\x02\x12\x14\n\x10READY_OPTIMIZING\x10\x03:b\xea\x41_\n\x1fspanner.googleapis.com/Database\x12\x82\xd3\xe4\x93\x02/\x12-/v1/{parent=projects/*/instances/*}/databases\xda\x41\x06parent\x12\xa4\x02\n\x0e\x43reateDatabase\x12\x37.google.spanner.admin.database.v1.CreateDatabaseRequest\x1a\x1d.google.longrunning.Operation"\xb9\x01\x82\xd3\xe4\x93\x02\x32"-/v1/{parent=projects/*/instances/*}/databases:\x01*\xda\x41\x17parent,create_statement\xca\x41\x64\n)google.spanner.admin.database.v1.Database\x12\x37google.spanner.admin.database.v1.CreateDatabaseMetadata\x12\xad\x01\n\x0bGetDatabase\x12\x34.google.spanner.admin.database.v1.GetDatabaseRequest\x1a*.google.spanner.admin.database.v1.Database"<\x82\xd3\xe4\x93\x02/\x12-/v1/{name=projects/*/instances/*/databases/*}\xda\x41\x04name\x12\x9d\x02\n\x11UpdateDatabaseDdl\x12:.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest\x1a\x1d.google.longrunning.Operation"\xac\x01\x82\xd3\xe4\x93\x02:25/v1/{database=projects/*/instances/*/databases/*}/ddl:\x01*\xda\x41\x13\x64\x61tabase,statements\xca\x41S\n\x15google.protobuf.Empty\x12:google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata\x12\xa3\x01\n\x0c\x44ropDatabase\x12\x35.google.spanner.admin.database.v1.DropDatabaseRequest\x1a\x16.google.protobuf.Empty"D\x82\xd3\xe4\x93\x02\x33*1/v1/{database=projects/*/instances/*/databases/*}\xda\x41\x08\x64\x61tabase\x12\xcd\x01\n\x0eGetDatabaseDdl\x12\x37.google.spanner.admin.database.v1.GetDatabaseDdlRequest\x1a\x38.google.spanner.admin.database.v1.GetDatabaseDdlResponse"H\x82\xd3\xe4\x93\x02\x37\x12\x35/v1/{database=projects/*/instances/*/databases/*}/ddl\xda\x41\x08\x64\x61tabase\x12\xeb\x01\n\x0cSetIamPolicy\x12".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"\x9f\x01\x82\xd3\xe4\x93\x02\x86\x01">/v1/{resource=projects/*/instances/*/databases/*}:setIamPolicy:\x01*ZA"/v1/{resource=projects/*/instances/*/databases/*}:getIamPolicy:\x01*ZA".google.spanner.admin.database.v1.ListBackupOperationsResponse"E\x82\xd3\xe4\x93\x02\x36\x12\x34/v1/{parent=projects/*/instances/*}/backupOperations\xda\x41\x06parent\x1ax\xca\x41\x16spanner.googleapis.com\xd2\x41\\https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/spanner.adminB\xda\x02\n$com.google.spanner.admin.database.v1B\x19SpannerDatabaseAdminProtoP\x01ZHgoogle.golang.org/genproto/googleapis/spanner/admin/database/v1;database\xaa\x02&Google.Cloud.Spanner.Admin.Database.V1\xca\x02&Google\\Cloud\\Spanner\\Admin\\Database\\V1\xea\x02+Google::Cloud::Spanner::Admin::Database::V1\xea\x41J\n\x1fspanner.googleapis.com/Instance\x12\'projects/{project}/instances/{instance}b\x06proto3', - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_api_dot_client__pb2.DESCRIPTOR, - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_iam_dot_v1_dot_iam__policy__pb2.DESCRIPTOR, - google_dot_iam_dot_v1_dot_policy__pb2.DESCRIPTOR, - google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, - google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_backup__pb2.DESCRIPTOR, - google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_common__pb2.DESCRIPTOR, - ], -) - -_RESTORESOURCETYPE = _descriptor.EnumDescriptor( - name="RestoreSourceType", - full_name="google.spanner.admin.database.v1.RestoreSourceType", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="TYPE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="BACKUP", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=3044, - serialized_end=3097, -) -_sym_db.RegisterEnumDescriptor(_RESTORESOURCETYPE) - -RestoreSourceType = enum_type_wrapper.EnumTypeWrapper(_RESTORESOURCETYPE) -TYPE_UNSPECIFIED = 0 -BACKUP = 1 - - -_DATABASE_STATE = _descriptor.EnumDescriptor( - name="State", - full_name="google.spanner.admin.database.v1.Database.State", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="STATE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="CREATING", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="READY", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="READY_OPTIMIZING", - index=3, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=907, - serialized_end=984, -) -_sym_db.RegisterEnumDescriptor(_DATABASE_STATE) - - -_RESTOREINFO = _descriptor.Descriptor( - name="RestoreInfo", - full_name="google.spanner.admin.database.v1.RestoreInfo", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="source_type", - full_name="google.spanner.admin.database.v1.RestoreInfo.source_type", - index=0, - number=1, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="backup_info", - full_name="google.spanner.admin.database.v1.RestoreInfo.backup_info", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="source_info", - full_name="google.spanner.admin.database.v1.RestoreInfo.source_info", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ) - ], - serialized_start=504, - serialized_end=675, -) - - -_DATABASE = _descriptor.Descriptor( - name="Database", - full_name="google.spanner.admin.database.v1.Database", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.spanner.admin.database.v1.Database.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="state", - full_name="google.spanner.admin.database.v1.Database.state", - index=1, - number=2, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="create_time", - full_name="google.spanner.admin.database.v1.Database.create_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="restore_info", - full_name="google.spanner.admin.database.v1.Database.restore_info", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_DATABASE_STATE], - serialized_options=b"\352A_\n\037spanner.googleapis.com/Database\022/instances//databases/``, - where ```` is as specified in the ``CREATE - DATABASE`` statement. This name can be passed to other API - methods to identify the database. - state: - Output only. The current database state. - create_time: - Output only. If exists, the time at which the database - creation started. - restore_info: - Output only. Applicable only for restored databases. Contains - information about the restore source. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.Database) - }, -) -_sym_db.RegisterMessage(Database) - -ListDatabasesRequest = _reflection.GeneratedProtocolMessageType( - "ListDatabasesRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTDATABASESREQUEST, - "__module__": "google.cloud.spanner_admin_database_v1.proto.spanner_database_admin_pb2", - "__doc__": """The request for [ListDatabases][google.spanner.admin.database.v1.Datab - aseAdmin.ListDatabases]. - - Attributes: - parent: - Required. The instance whose databases should be listed. - Values are of the form - ``projects//instances/``. - page_size: - Number of databases to be returned in the response. If 0 or - less, defaults to the server’s maximum allowed page size. - page_token: - If non-empty, ``page_token`` should contain a [next\_page\_tok - en][google.spanner.admin.database.v1.ListDatabasesResponse.nex - t\_page\_token] from a previous [ListDatabasesResponse][google - .spanner.admin.database.v1.ListDatabasesResponse]. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.ListDatabasesRequest) - }, -) -_sym_db.RegisterMessage(ListDatabasesRequest) - -ListDatabasesResponse = _reflection.GeneratedProtocolMessageType( - "ListDatabasesResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTDATABASESRESPONSE, - "__module__": "google.cloud.spanner_admin_database_v1.proto.spanner_database_admin_pb2", - "__doc__": """The response for [ListDatabases][google.spanner.admin.database.v1.Data - baseAdmin.ListDatabases]. - - Attributes: - databases: - Databases that matched the request. - next_page_token: - \ ``next_page_token`` can be sent in a subsequent [ListDatabas - es][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabas - es] call to fetch more of the matching databases. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.ListDatabasesResponse) - }, -) -_sym_db.RegisterMessage(ListDatabasesResponse) - -CreateDatabaseRequest = _reflection.GeneratedProtocolMessageType( - "CreateDatabaseRequest", - (_message.Message,), - { - "DESCRIPTOR": _CREATEDATABASEREQUEST, - "__module__": "google.cloud.spanner_admin_database_v1.proto.spanner_database_admin_pb2", - "__doc__": """The request for [CreateDatabase][google.spanner.admin.database.v1.Data - baseAdmin.CreateDatabase]. - - Attributes: - parent: - Required. The name of the instance that will serve the new - database. Values are of the form - ``projects//instances/``. - create_statement: - Required. A ``CREATE DATABASE`` statement, which specifies the - ID of the new database. The database ID must conform to the - regular expression ``[a-z][a-z0-9_\-]*[a-z0-9]`` and be - between 2 and 30 characters in length. If the database ID is a - reserved word or if it contains a hyphen, the database ID must - be enclosed in backticks (:literal:`\``). - extra_statements: - Optional. A list of DDL statements to run inside the newly - created database. Statements can create tables, indexes, etc. - These statements execute atomically with the creation of the - database: if there is an error in any statement, the database - is not created. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.CreateDatabaseRequest) - }, -) -_sym_db.RegisterMessage(CreateDatabaseRequest) - -CreateDatabaseMetadata = _reflection.GeneratedProtocolMessageType( - "CreateDatabaseMetadata", - (_message.Message,), - { - "DESCRIPTOR": _CREATEDATABASEMETADATA, - "__module__": "google.cloud.spanner_admin_database_v1.proto.spanner_database_admin_pb2", - "__doc__": """Metadata type for the operation returned by [CreateDatabase][google.sp - anner.admin.database.v1.DatabaseAdmin.CreateDatabase]. - - Attributes: - database: - The database being created. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.CreateDatabaseMetadata) - }, -) -_sym_db.RegisterMessage(CreateDatabaseMetadata) - -GetDatabaseRequest = _reflection.GeneratedProtocolMessageType( - "GetDatabaseRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETDATABASEREQUEST, - "__module__": "google.cloud.spanner_admin_database_v1.proto.spanner_database_admin_pb2", - "__doc__": """The request for [GetDatabase][google.spanner.admin.database.v1.Databas - eAdmin.GetDatabase]. - - Attributes: - name: - Required. The name of the requested database. Values are of - the form ``projects//instances//databases/< - database>``. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.GetDatabaseRequest) - }, -) -_sym_db.RegisterMessage(GetDatabaseRequest) - -UpdateDatabaseDdlRequest = _reflection.GeneratedProtocolMessageType( - "UpdateDatabaseDdlRequest", - (_message.Message,), - { - "DESCRIPTOR": _UPDATEDATABASEDDLREQUEST, - "__module__": "google.cloud.spanner_admin_database_v1.proto.spanner_database_admin_pb2", - "__doc__": """Enqueues the given DDL statements to be applied, in order but not - necessarily all at once, to the database schema at some point (or - points) in the future. The server checks that the statements are - executable (syntactically valid, name tables that exist, etc.) before - enqueueing them, but they may still fail upon later execution (e.g., - if a statement from another batch of statements is applied first and - it conflicts in some way, or if there is some data-related problem - like a ``NULL`` value in a column to which ``NOT NULL`` would be - added). If a statement fails, all subsequent statements in the batch - are automatically cancelled. Each batch of statements is assigned a - name which can be used with the - [Operations][google.longrunning.Operations] API to monitor progress. - See the [operation_id][google.spanner.admin.database.v1.UpdateDatabase - DdlRequest.operation_id] field for more details. - - Attributes: - database: - Required. The database to update. - statements: - Required. DDL statements to be applied to the database. - operation_id: - If empty, the new update request is assigned an automatically- - generated operation ID. Otherwise, ``operation_id`` is used to - construct the name of the resulting - [Operation][google.longrunning.Operation]. Specifying an - explicit operation ID simplifies determining whether the - statements were executed in the event that the [UpdateDatabase - Ddl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateData - baseDdl] call is replayed, or the return value is otherwise - lost: the [database][google.spanner.admin.database.v1.UpdateDa - tabaseDdlRequest.database] and ``operation_id`` fields can be - combined to form the [name][google.longrunning.Operation.name] - of the resulting - [longrunning.Operation][google.longrunning.Operation]: - ``/operations/``. ``operation_id`` - should be unique within the database, and must be a valid - identifier: ``[a-z][a-z0-9_]*``. Note that automatically- - generated operation IDs always begin with an underscore. If - the named operation already exists, [UpdateDatabaseDdl][google - .spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] - returns ``ALREADY_EXISTS``. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.UpdateDatabaseDdlRequest) - }, -) -_sym_db.RegisterMessage(UpdateDatabaseDdlRequest) - -UpdateDatabaseDdlMetadata = _reflection.GeneratedProtocolMessageType( - "UpdateDatabaseDdlMetadata", - (_message.Message,), - { - "DESCRIPTOR": _UPDATEDATABASEDDLMETADATA, - "__module__": "google.cloud.spanner_admin_database_v1.proto.spanner_database_admin_pb2", - "__doc__": """Metadata type for the operation returned by [UpdateDatabaseDdl][google - .spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]. - - Attributes: - database: - The database being modified. - statements: - For an update this list contains all the statements. For an - individual statement, this list contains only that statement. - commit_timestamps: - Reports the commit timestamps of all statements that have - succeeded so far, where ``commit_timestamps[i]`` is the commit - timestamp for the statement ``statements[i]``. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata) - }, -) -_sym_db.RegisterMessage(UpdateDatabaseDdlMetadata) - -DropDatabaseRequest = _reflection.GeneratedProtocolMessageType( - "DropDatabaseRequest", - (_message.Message,), - { - "DESCRIPTOR": _DROPDATABASEREQUEST, - "__module__": "google.cloud.spanner_admin_database_v1.proto.spanner_database_admin_pb2", - "__doc__": """The request for [DropDatabase][google.spanner.admin.database.v1.Databa - seAdmin.DropDatabase]. - - Attributes: - database: - Required. The database to be dropped. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.DropDatabaseRequest) - }, -) -_sym_db.RegisterMessage(DropDatabaseRequest) - -GetDatabaseDdlRequest = _reflection.GeneratedProtocolMessageType( - "GetDatabaseDdlRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETDATABASEDDLREQUEST, - "__module__": "google.cloud.spanner_admin_database_v1.proto.spanner_database_admin_pb2", - "__doc__": """The request for [GetDatabaseDdl][google.spanner.admin.database.v1.Data - baseAdmin.GetDatabaseDdl]. - - Attributes: - database: - Required. The database whose schema we wish to get. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.GetDatabaseDdlRequest) - }, -) -_sym_db.RegisterMessage(GetDatabaseDdlRequest) - -GetDatabaseDdlResponse = _reflection.GeneratedProtocolMessageType( - "GetDatabaseDdlResponse", - (_message.Message,), - { - "DESCRIPTOR": _GETDATABASEDDLRESPONSE, - "__module__": "google.cloud.spanner_admin_database_v1.proto.spanner_database_admin_pb2", - "__doc__": """The response for [GetDatabaseDdl][google.spanner.admin.database.v1.Dat - abaseAdmin.GetDatabaseDdl]. - - Attributes: - statements: - A list of formatted DDL statements defining the schema of the - database specified in the request. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.GetDatabaseDdlResponse) - }, -) -_sym_db.RegisterMessage(GetDatabaseDdlResponse) - -ListDatabaseOperationsRequest = _reflection.GeneratedProtocolMessageType( - "ListDatabaseOperationsRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTDATABASEOPERATIONSREQUEST, - "__module__": "google.cloud.spanner_admin_database_v1.proto.spanner_database_admin_pb2", - "__doc__": """The request for [ListDatabaseOperations][google.spanner.admin.database - .v1.DatabaseAdmin.ListDatabaseOperations]. - - Attributes: - parent: - Required. The instance of the database operations. Values are - of the form ``projects//instances/``. - filter: - An expression that filters the list of returned operations. A - filter expression consists of a field name, a comparison - operator, and a value for filtering. The value must be a - string, a number, or a boolean. The comparison operator must - be one of: ``<``, ``>``, ``<=``, ``>=``, ``!=``, ``=``, or - ``:``. Colon ``:`` is the contains operator. Filter rules are - not case sensitive. The following fields in the - [Operation][google.longrunning.Operation] are eligible for - filtering: - ``name`` - The name of the long-running - operation - ``done`` - False if the operation is in progress, - else true. - ``metadata.@type`` - the type of metadata. For - example, the type string for [RestoreDatabaseMetadata][g - oogle.spanner.admin.database.v1.RestoreDatabaseMetadata] is - ``type.googleapis.com/google.spanner.admin.database.v1.Restore - DatabaseMetadata``. - ``metadata.`` - any field - in metadata.value. - ``error`` - Error associated with the - long-running operation. - ``response.@type`` - the type of - response. - ``response.`` - any field in - response.value. You can combine multiple expressions by - enclosing each expression in parentheses. By default, - expressions are combined with AND logic. However, you can - specify AND, OR, and NOT logic explicitly. Here are a few - examples: - ``done:true`` - The operation is complete. - `` - (metadata.@type=type.googleapis.com/google.spanner.admin.datab - ase.v1.RestoreDatabaseMetadata) AND`` - ``(metadata.source_type:BACKUP) AND`` - ``(metadata.backup_info.backup:backup_howl) AND`` - ``(metadata.name:restored_howl) AND`` - ``(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") - AND`` ``(error:*)`` - Return operations where: - The - operation’s metadata type is [RestoreDatabaseMetadata][g - oogle.spanner.admin.database.v1.RestoreDatabaseMetadata]. - - The database is restored from a backup. - The backup name - contains “backup_howl”. - The restored database’s name - contains “restored_howl”. - The operation started before - 2018-03-28T14:50:00Z. - The operation resulted in an - error. - page_size: - Number of operations to be returned in the response. If 0 or - less, defaults to the server’s maximum allowed page size. - page_token: - If non-empty, ``page_token`` should contain a [next\_page\_tok - en][google.spanner.admin.database.v1.ListDatabaseOperationsRes - ponse.next\_page\_token] from a previous [ListDatabaseOperatio - nsResponse][google.spanner.admin.database.v1.ListDatabaseOpera - tionsResponse] to the same ``parent`` and with the same - ``filter``. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.ListDatabaseOperationsRequest) - }, -) -_sym_db.RegisterMessage(ListDatabaseOperationsRequest) - -ListDatabaseOperationsResponse = _reflection.GeneratedProtocolMessageType( - "ListDatabaseOperationsResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTDATABASEOPERATIONSRESPONSE, - "__module__": "google.cloud.spanner_admin_database_v1.proto.spanner_database_admin_pb2", - "__doc__": """The response for [ListDatabaseOperations][google.spanner.admin.databas - e.v1.DatabaseAdmin.ListDatabaseOperations]. - - Attributes: - operations: - The list of matching database [long-running - operations][google.longrunning.Operation]. Each operation’s - name will be prefixed by the database’s name. The operation’s - [metadata][google.longrunning.Operation.metadata] field type - ``metadata.type_url`` describes the type of the metadata. - next_page_token: - \ ``next_page_token`` can be sent in a subsequent [ListDatabas - eOperations][google.spanner.admin.database.v1.DatabaseAdmin.Li - stDatabaseOperations] call to fetch more of the matching - metadata. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.ListDatabaseOperationsResponse) - }, -) -_sym_db.RegisterMessage(ListDatabaseOperationsResponse) - -RestoreDatabaseRequest = _reflection.GeneratedProtocolMessageType( - "RestoreDatabaseRequest", - (_message.Message,), - { - "DESCRIPTOR": _RESTOREDATABASEREQUEST, - "__module__": "google.cloud.spanner_admin_database_v1.proto.spanner_database_admin_pb2", - "__doc__": """The request for [RestoreDatabase][google.spanner.admin.database.v1.Dat - abaseAdmin.RestoreDatabase]. - - Attributes: - parent: - Required. The name of the instance in which to create the - restored database. This instance must be in the same project - and have the same instance configuration as the instance - containing the source backup. Values are of the form - ``projects//instances/``. - database_id: - Required. The id of the database to create and restore to. - This database must not already exist. The ``database_id`` - appended to ``parent`` forms the full database name of the - form ``projects//instances//databases/``. - source: - Required. The source from which to restore. - backup: - Name of the backup from which to restore. Values are of the - form - ``projects//instances//backups/``. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.RestoreDatabaseRequest) - }, -) -_sym_db.RegisterMessage(RestoreDatabaseRequest) - -RestoreDatabaseMetadata = _reflection.GeneratedProtocolMessageType( - "RestoreDatabaseMetadata", - (_message.Message,), - { - "DESCRIPTOR": _RESTOREDATABASEMETADATA, - "__module__": "google.cloud.spanner_admin_database_v1.proto.spanner_database_admin_pb2", - "__doc__": """Metadata type for the long-running operation returned by [RestoreDatab - ase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase]. - - Attributes: - name: - Name of the database being created and restored to. - source_type: - The type of the restore source. - source_info: - Information about the source used to restore the database, as - specified by ``source`` in [RestoreDatabaseRequest][google.spa - nner.admin.database.v1.RestoreDatabaseRequest]. - backup_info: - Information about the backup used to restore the database. - progress: - The progress of the [RestoreDatabase][google.spanner.admin.dat - abase.v1.DatabaseAdmin.RestoreDatabase] operation. - cancel_time: - The time at which cancellation of this operation was received. - [Operations.CancelOperation][google.longrunning.Operations.Can - celOperation] starts asynchronous cancellation on a long- - running operation. The server makes a best effort to cancel - the operation, but success is not guaranteed. Clients can use - [Operations.GetOperation][google.longrunning.Operations.GetOpe - ration] or other methods to check whether the cancellation - succeeded or whether the operation completed despite - cancellation. On successful cancellation, the operation is not - deleted; instead, it becomes an operation with an - [Operation.error][google.longrunning.Operation.error] value - with a [google.rpc.Status.code][google.rpc.Status.code] of 1, - corresponding to ``Code.CANCELLED``. - optimize_database_operation_name: - If exists, the name of the long-running operation that will be - used to track the post-restore optimization process to - optimize the performance of the restored database, and remove - the dependency on the restore source. The name is of the form - ``projects//instances//databases/ - /operations/`` where the is the name of database - being created and restored to. The metadata type of the long- - running operation is [OptimizeRestoredDatabaseMetadata][google - .spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata]. - This long-running operation will be automatically created by - the system after the RestoreDatabase long-running operation - completes successfully. This operation will not be created if - the restore was not successful. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.RestoreDatabaseMetadata) - }, -) -_sym_db.RegisterMessage(RestoreDatabaseMetadata) - -OptimizeRestoredDatabaseMetadata = _reflection.GeneratedProtocolMessageType( - "OptimizeRestoredDatabaseMetadata", - (_message.Message,), - { - "DESCRIPTOR": _OPTIMIZERESTOREDDATABASEMETADATA, - "__module__": "google.cloud.spanner_admin_database_v1.proto.spanner_database_admin_pb2", - "__doc__": """Metadata type for the long-running operation used to track the - progress of optimizations performed on a newly restored database. This - long-running operation is automatically created by the system after - the successful completion of a database restore, and cannot be - cancelled. - - Attributes: - name: - Name of the restored database being optimized. - progress: - The progress of the post-restore optimizations. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata) - }, -) -_sym_db.RegisterMessage(OptimizeRestoredDatabaseMetadata) - - -DESCRIPTOR._options = None -_DATABASE.fields_by_name["name"]._options = None -_DATABASE.fields_by_name["state"]._options = None -_DATABASE.fields_by_name["create_time"]._options = None -_DATABASE.fields_by_name["restore_info"]._options = None -_DATABASE._options = None -_LISTDATABASESREQUEST.fields_by_name["parent"]._options = None -_CREATEDATABASEREQUEST.fields_by_name["parent"]._options = None -_CREATEDATABASEREQUEST.fields_by_name["create_statement"]._options = None -_CREATEDATABASEREQUEST.fields_by_name["extra_statements"]._options = None -_CREATEDATABASEMETADATA.fields_by_name["database"]._options = None -_GETDATABASEREQUEST.fields_by_name["name"]._options = None -_UPDATEDATABASEDDLREQUEST.fields_by_name["database"]._options = None -_UPDATEDATABASEDDLREQUEST.fields_by_name["statements"]._options = None -_UPDATEDATABASEDDLMETADATA.fields_by_name["database"]._options = None -_DROPDATABASEREQUEST.fields_by_name["database"]._options = None -_GETDATABASEDDLREQUEST.fields_by_name["database"]._options = None -_LISTDATABASEOPERATIONSREQUEST.fields_by_name["parent"]._options = None -_RESTOREDATABASEREQUEST.fields_by_name["parent"]._options = None -_RESTOREDATABASEREQUEST.fields_by_name["database_id"]._options = None -_RESTOREDATABASEREQUEST.fields_by_name["backup"]._options = None - -_DATABASEADMIN = _descriptor.ServiceDescriptor( - name="DatabaseAdmin", - full_name="google.spanner.admin.database.v1.DatabaseAdmin", - file=DESCRIPTOR, - index=0, - serialized_options=b"\312A\026spanner.googleapis.com\322A\\https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/spanner.admin", - create_key=_descriptor._internal_create_key, - serialized_start=3100, - serialized_end=7087, - methods=[ - _descriptor.MethodDescriptor( - name="ListDatabases", - full_name="google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases", - index=0, - containing_service=None, - input_type=_LISTDATABASESREQUEST, - output_type=_LISTDATABASESRESPONSE, - serialized_options=b"\202\323\344\223\002/\022-/v1/{parent=projects/*/instances/*}/databases\332A\006parent", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="CreateDatabase", - full_name="google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase", - index=1, - containing_service=None, - input_type=_CREATEDATABASEREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\0022"-/v1/{parent=projects/*/instances/*}/databases:\001*\332A\027parent,create_statement\312Ad\n)google.spanner.admin.database.v1.Database\0227google.spanner.admin.database.v1.CreateDatabaseMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetDatabase", - full_name="google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase", - index=2, - containing_service=None, - input_type=_GETDATABASEREQUEST, - output_type=_DATABASE, - serialized_options=b"\202\323\344\223\002/\022-/v1/{name=projects/*/instances/*/databases/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="UpdateDatabaseDdl", - full_name="google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl", - index=3, - containing_service=None, - input_type=_UPDATEDATABASEDDLREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b"\202\323\344\223\002:25/v1/{database=projects/*/instances/*/databases/*}/ddl:\001*\332A\023database,statements\312AS\n\025google.protobuf.Empty\022:google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DropDatabase", - full_name="google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase", - index=4, - containing_service=None, - input_type=_DROPDATABASEREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b"\202\323\344\223\0023*1/v1/{database=projects/*/instances/*/databases/*}\332A\010database", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetDatabaseDdl", - full_name="google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl", - index=5, - containing_service=None, - input_type=_GETDATABASEDDLREQUEST, - output_type=_GETDATABASEDDLRESPONSE, - serialized_options=b"\202\323\344\223\0027\0225/v1/{database=projects/*/instances/*/databases/*}/ddl\332A\010database", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="SetIamPolicy", - full_name="google.spanner.admin.database.v1.DatabaseAdmin.SetIamPolicy", - index=6, - containing_service=None, - input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._SETIAMPOLICYREQUEST, - output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, - serialized_options=b'\202\323\344\223\002\206\001">/v1/{resource=projects/*/instances/*/databases/*}:setIamPolicy:\001*ZA"/v1/{resource=projects/*/instances/*/databases/*}:getIamPolicy:\001*ZA"/operations/` and - can be used to track preparation of the database. The - [metadata][google.longrunning.Operation.metadata] field type is - [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. The - [response][google.longrunning.Operation.response] field type is - [Database][google.spanner.admin.database.v1.Database], if successful. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetDatabase(self, request, context): - """Gets the state of a Cloud Spanner database. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateDatabaseDdl(self, request, context): - """Updates the schema of a Cloud Spanner database by - creating/altering/dropping tables, columns, indexes, etc. The returned - [long-running operation][google.longrunning.Operation] will have a name of - the format `/operations/` and can be used to - track execution of the schema change(s). The - [metadata][google.longrunning.Operation.metadata] field type is - [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. The operation has no response. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DropDatabase(self, request, context): - """Drops (aka deletes) a Cloud Spanner database. - Completed backups for the database will be retained according to their - `expire_time`. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetDatabaseDdl(self, request, context): - """Returns the schema of a Cloud Spanner database as a list of formatted - DDL statements. This method does not show pending schema updates, those may - be queried using the [Operations][google.longrunning.Operations] API. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def SetIamPolicy(self, request, context): - """Sets the access control policy on a database or backup resource. - Replaces any existing policy. - - Authorization requires `spanner.databases.setIamPolicy` - permission on [resource][google.iam.v1.SetIamPolicyRequest.resource]. - For backups, authorization requires `spanner.backups.setIamPolicy` - permission on [resource][google.iam.v1.SetIamPolicyRequest.resource]. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetIamPolicy(self, request, context): - """Gets the access control policy for a database or backup resource. - Returns an empty policy if a database or backup exists but does not have a - policy set. - - Authorization requires `spanner.databases.getIamPolicy` permission on - [resource][google.iam.v1.GetIamPolicyRequest.resource]. - For backups, authorization requires `spanner.backups.getIamPolicy` - permission on [resource][google.iam.v1.GetIamPolicyRequest.resource]. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def TestIamPermissions(self, request, context): - """Returns permissions that the caller has on the specified database or backup - resource. - - Attempting this RPC on a non-existent Cloud Spanner database will - result in a NOT_FOUND error if the user has - `spanner.databases.list` permission on the containing Cloud - Spanner instance. Otherwise returns an empty set of permissions. - Calling this method on a backup that does not exist will - result in a NOT_FOUND error if the user has - `spanner.backups.list` permission on the containing instance. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CreateBackup(self, request, context): - """Starts creating a new Cloud Spanner Backup. - The returned backup [long-running operation][google.longrunning.Operation] - will have a name of the format - `projects//instances//backups//operations/` - and can be used to track creation of the backup. The - [metadata][google.longrunning.Operation.metadata] field type is - [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. The - [response][google.longrunning.Operation.response] field type is - [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned operation will stop the - creation and delete the backup. - There can be only one pending backup creation per database. Backup creation - of different databases can run concurrently. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetBackup(self, request, context): - """Gets metadata on a pending or completed [Backup][google.spanner.admin.database.v1.Backup]. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateBackup(self, request, context): - """Updates a pending or completed [Backup][google.spanner.admin.database.v1.Backup]. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteBackup(self, request, context): - """Deletes a pending or completed [Backup][google.spanner.admin.database.v1.Backup]. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListBackups(self, request, context): - """Lists completed and pending backups. - Backups returned are ordered by `create_time` in descending order, - starting from the most recent `create_time`. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def RestoreDatabase(self, request, context): - """Create a new database by restoring from a completed backup. The new - database must be in the same project and in an instance with the same - instance configuration as the instance containing - the backup. The returned database [long-running - operation][google.longrunning.Operation] has a name of the format - `projects//instances//databases//operations/`, - and can be used to track the progress of the operation, and to cancel it. - The [metadata][google.longrunning.Operation.metadata] field type is - [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. - The [response][google.longrunning.Operation.response] type - is [Database][google.spanner.admin.database.v1.Database], if - successful. Cancelling the returned operation will stop the restore and - delete the database. - There can be only one database being restored into an instance at a time. - Once the restore operation completes, a new restore operation can be - initiated, without waiting for the optimize operation associated with the - first restore to complete. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListDatabaseOperations(self, request, context): - """Lists database [longrunning-operations][google.longrunning.Operation]. - A database operation has a name of the form - `projects//instances//databases//operations/`. - The long-running operation - [metadata][google.longrunning.Operation.metadata] field type - `metadata.type_url` describes the type of the metadata. Operations returned - include those that have completed/failed/canceled within the last 7 days, - and pending operations. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListBackupOperations(self, request, context): - """Lists the backup [long-running operations][google.longrunning.Operation] in - the given instance. A backup operation has a name of the form - `projects//instances//backups//operations/`. - The long-running operation - [metadata][google.longrunning.Operation.metadata] field type - `metadata.type_url` describes the type of the metadata. Operations returned - include those that have completed/failed/canceled within the last 7 days, - and pending operations. Operations returned are ordered by - `operation.metadata.value.progress.start_time` in descending order starting - from the most recently started operation. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_DatabaseAdminServicer_to_server(servicer, server): - rpc_method_handlers = { - "ListDatabases": grpc.unary_unary_rpc_method_handler( - servicer.ListDatabases, - request_deserializer=google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_spanner__database__admin__pb2.ListDatabasesRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_spanner__database__admin__pb2.ListDatabasesResponse.SerializeToString, - ), - "CreateDatabase": grpc.unary_unary_rpc_method_handler( - servicer.CreateDatabase, - request_deserializer=google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_spanner__database__admin__pb2.CreateDatabaseRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "GetDatabase": grpc.unary_unary_rpc_method_handler( - servicer.GetDatabase, - request_deserializer=google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_spanner__database__admin__pb2.GetDatabaseRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_spanner__database__admin__pb2.Database.SerializeToString, - ), - "UpdateDatabaseDdl": grpc.unary_unary_rpc_method_handler( - servicer.UpdateDatabaseDdl, - request_deserializer=google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_spanner__database__admin__pb2.UpdateDatabaseDdlRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "DropDatabase": grpc.unary_unary_rpc_method_handler( - servicer.DropDatabase, - request_deserializer=google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_spanner__database__admin__pb2.DropDatabaseRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "GetDatabaseDdl": grpc.unary_unary_rpc_method_handler( - servicer.GetDatabaseDdl, - request_deserializer=google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_spanner__database__admin__pb2.GetDatabaseDdlRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_spanner__database__admin__pb2.GetDatabaseDdlResponse.SerializeToString, - ), - "SetIamPolicy": grpc.unary_unary_rpc_method_handler( - servicer.SetIamPolicy, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, - ), - "GetIamPolicy": grpc.unary_unary_rpc_method_handler( - servicer.GetIamPolicy, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, - ), - "TestIamPermissions": grpc.unary_unary_rpc_method_handler( - servicer.TestIamPermissions, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.SerializeToString, - ), - "CreateBackup": grpc.unary_unary_rpc_method_handler( - servicer.CreateBackup, - request_deserializer=google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_backup__pb2.CreateBackupRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "GetBackup": grpc.unary_unary_rpc_method_handler( - servicer.GetBackup, - request_deserializer=google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_backup__pb2.GetBackupRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_backup__pb2.Backup.SerializeToString, - ), - "UpdateBackup": grpc.unary_unary_rpc_method_handler( - servicer.UpdateBackup, - request_deserializer=google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_backup__pb2.UpdateBackupRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_backup__pb2.Backup.SerializeToString, - ), - "DeleteBackup": grpc.unary_unary_rpc_method_handler( - servicer.DeleteBackup, - request_deserializer=google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_backup__pb2.DeleteBackupRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "ListBackups": grpc.unary_unary_rpc_method_handler( - servicer.ListBackups, - request_deserializer=google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_backup__pb2.ListBackupsRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_backup__pb2.ListBackupsResponse.SerializeToString, - ), - "RestoreDatabase": grpc.unary_unary_rpc_method_handler( - servicer.RestoreDatabase, - request_deserializer=google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_spanner__database__admin__pb2.RestoreDatabaseRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "ListDatabaseOperations": grpc.unary_unary_rpc_method_handler( - servicer.ListDatabaseOperations, - request_deserializer=google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_spanner__database__admin__pb2.ListDatabaseOperationsRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_spanner__database__admin__pb2.ListDatabaseOperationsResponse.SerializeToString, - ), - "ListBackupOperations": grpc.unary_unary_rpc_method_handler( - servicer.ListBackupOperations, - request_deserializer=google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_backup__pb2.ListBackupOperationsRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_backup__pb2.ListBackupOperationsResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.spanner.admin.database.v1.DatabaseAdmin", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) - - -# This class is part of an EXPERIMENTAL API. -class DatabaseAdmin(object): - """Cloud Spanner Database Admin API - - The Cloud Spanner Database Admin API can be used to create, drop, and - list databases. It also enables updating the schema of pre-existing - databases. It can be also used to create, delete and list backups for a - database and to restore from an existing backup. - """ - - @staticmethod - def ListDatabases( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabases", - google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_spanner__database__admin__pb2.ListDatabasesRequest.SerializeToString, - google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_spanner__database__admin__pb2.ListDatabasesResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def CreateDatabase( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.admin.database.v1.DatabaseAdmin/CreateDatabase", - google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_spanner__database__admin__pb2.CreateDatabaseRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetDatabase( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabase", - google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_spanner__database__admin__pb2.GetDatabaseRequest.SerializeToString, - google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_spanner__database__admin__pb2.Database.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def UpdateDatabaseDdl( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabaseDdl", - google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_spanner__database__admin__pb2.UpdateDatabaseDdlRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def DropDatabase( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.admin.database.v1.DatabaseAdmin/DropDatabase", - google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_spanner__database__admin__pb2.DropDatabaseRequest.SerializeToString, - google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetDatabaseDdl( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabaseDdl", - google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_spanner__database__admin__pb2.GetDatabaseDdlRequest.SerializeToString, - google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_spanner__database__admin__pb2.GetDatabaseDdlResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def SetIamPolicy( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.admin.database.v1.DatabaseAdmin/SetIamPolicy", - google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString, - google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetIamPolicy( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.admin.database.v1.DatabaseAdmin/GetIamPolicy", - google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString, - google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def TestIamPermissions( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.admin.database.v1.DatabaseAdmin/TestIamPermissions", - google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString, - google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def CreateBackup( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackup", - google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_backup__pb2.CreateBackupRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetBackup( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.admin.database.v1.DatabaseAdmin/GetBackup", - google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_backup__pb2.GetBackupRequest.SerializeToString, - google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_backup__pb2.Backup.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def UpdateBackup( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackup", - google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_backup__pb2.UpdateBackupRequest.SerializeToString, - google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_backup__pb2.Backup.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def DeleteBackup( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackup", - google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_backup__pb2.DeleteBackupRequest.SerializeToString, - google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ListBackups( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackups", - google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_backup__pb2.ListBackupsRequest.SerializeToString, - google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_backup__pb2.ListBackupsResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def RestoreDatabase( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.admin.database.v1.DatabaseAdmin/RestoreDatabase", - google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_spanner__database__admin__pb2.RestoreDatabaseRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ListDatabaseOperations( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseOperations", - google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_spanner__database__admin__pb2.ListDatabaseOperationsRequest.SerializeToString, - google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_spanner__database__admin__pb2.ListDatabaseOperationsResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ListBackupOperations( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupOperations", - google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_backup__pb2.ListBackupOperationsRequest.SerializeToString, - google_dot_cloud_dot_spanner__admin__database__v1_dot_proto_dot_backup__pb2.ListBackupOperationsResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) diff --git a/google/cloud/spanner_admin_database_v1/py.typed b/google/cloud/spanner_admin_database_v1/py.typed new file mode 100644 index 0000000000..29f334aad6 --- /dev/null +++ b/google/cloud/spanner_admin_database_v1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-spanner-admin-database package uses inline types. diff --git a/google/cloud/spanner_admin_database_v1/services/__init__.py b/google/cloud/spanner_admin_database_v1/services/__init__.py new file mode 100644 index 0000000000..42ffdf2bc4 --- /dev/null +++ b/google/cloud/spanner_admin_database_v1/services/__init__.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/__init__.py b/google/cloud/spanner_admin_database_v1/services/database_admin/__init__.py new file mode 100644 index 0000000000..1fd198c176 --- /dev/null +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/__init__.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .client import DatabaseAdminClient +from .async_client import DatabaseAdminAsyncClient + +__all__ = ( + "DatabaseAdminClient", + "DatabaseAdminAsyncClient", +) diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py b/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py new file mode 100644 index 0000000000..4f15f2e2c8 --- /dev/null +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py @@ -0,0 +1,1925 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.spanner_admin_database_v1.services.database_admin import pagers +from google.cloud.spanner_admin_database_v1.types import backup +from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup +from google.cloud.spanner_admin_database_v1.types import spanner_database_admin +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.longrunning import operations_pb2 as operations # type: ignore +from google.protobuf import empty_pb2 as empty # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + +from .transports.base import DatabaseAdminTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import DatabaseAdminGrpcAsyncIOTransport +from .client import DatabaseAdminClient + + +class DatabaseAdminAsyncClient: + """Cloud Spanner Database Admin API + The Cloud Spanner Database Admin API can be used to create, + drop, and list databases. It also enables updating the schema of + pre-existing databases. It can be also used to create, delete + and list backups for a database and to restore from an existing + backup. + """ + + _client: DatabaseAdminClient + + DEFAULT_ENDPOINT = DatabaseAdminClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = DatabaseAdminClient.DEFAULT_MTLS_ENDPOINT + + backup_path = staticmethod(DatabaseAdminClient.backup_path) + parse_backup_path = staticmethod(DatabaseAdminClient.parse_backup_path) + database_path = staticmethod(DatabaseAdminClient.database_path) + parse_database_path = staticmethod(DatabaseAdminClient.parse_database_path) + instance_path = staticmethod(DatabaseAdminClient.instance_path) + parse_instance_path = staticmethod(DatabaseAdminClient.parse_instance_path) + + common_billing_account_path = staticmethod( + DatabaseAdminClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + DatabaseAdminClient.parse_common_billing_account_path + ) + + common_folder_path = staticmethod(DatabaseAdminClient.common_folder_path) + parse_common_folder_path = staticmethod( + DatabaseAdminClient.parse_common_folder_path + ) + + common_organization_path = staticmethod( + DatabaseAdminClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + DatabaseAdminClient.parse_common_organization_path + ) + + common_project_path = staticmethod(DatabaseAdminClient.common_project_path) + parse_common_project_path = staticmethod( + DatabaseAdminClient.parse_common_project_path + ) + + common_location_path = staticmethod(DatabaseAdminClient.common_location_path) + parse_common_location_path = staticmethod( + DatabaseAdminClient.parse_common_location_path + ) + + from_service_account_file = DatabaseAdminClient.from_service_account_file + from_service_account_json = from_service_account_file + + @property + def transport(self) -> DatabaseAdminTransport: + """Return the transport used by the client instance. + + Returns: + DatabaseAdminTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial( + type(DatabaseAdminClient).get_transport_class, type(DatabaseAdminClient) + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, DatabaseAdminTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the database admin client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.DatabaseAdminTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = DatabaseAdminClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def list_databases( + self, + request: spanner_database_admin.ListDatabasesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDatabasesAsyncPager: + r"""Lists Cloud Spanner databases. + + Args: + request (:class:`~.spanner_database_admin.ListDatabasesRequest`): + The request object. The request for + [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. + parent (:class:`str`): + Required. The instance whose databases should be listed. + Values are of the form + ``projects//instances/``. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListDatabasesAsyncPager: + The response for + [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = spanner_database_admin.ListDatabasesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_databases, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=3600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListDatabasesAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def create_database( + self, + request: spanner_database_admin.CreateDatabaseRequest = None, + *, + parent: str = None, + create_statement: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a new Cloud Spanner database and starts to prepare it + for serving. The returned [long-running + operation][google.longrunning.Operation] will have a name of the + format ``/operations/`` and can be + used to track preparation of the database. The + [metadata][google.longrunning.Operation.metadata] field type is + [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. + The [response][google.longrunning.Operation.response] field type + is [Database][google.spanner.admin.database.v1.Database], if + successful. + + Args: + request (:class:`~.spanner_database_admin.CreateDatabaseRequest`): + The request object. The request for + [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase]. + parent (:class:`str`): + Required. The name of the instance that will serve the + new database. Values are of the form + ``projects//instances/``. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + create_statement (:class:`str`): + Required. A ``CREATE DATABASE`` statement, which + specifies the ID of the new database. The database ID + must conform to the regular expression + ``[a-z][a-z0-9_\-]*[a-z0-9]`` and be between 2 and 30 + characters in length. If the database ID is a reserved + word or if it contains a hyphen, the database ID must be + enclosed in backticks (:literal:`\``). + This corresponds to the ``create_statement`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.spanner_database_admin.Database``: A Cloud + Spanner database. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, create_statement]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = spanner_database_admin.CreateDatabaseRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if create_statement is not None: + request.create_statement = create_statement + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_database, + default_timeout=3600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + spanner_database_admin.Database, + metadata_type=spanner_database_admin.CreateDatabaseMetadata, + ) + + # Done; return the response. + return response + + async def get_database( + self, + request: spanner_database_admin.GetDatabaseRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> spanner_database_admin.Database: + r"""Gets the state of a Cloud Spanner database. + + Args: + request (:class:`~.spanner_database_admin.GetDatabaseRequest`): + The request object. The request for + [GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase]. + name (:class:`str`): + Required. The name of the requested database. Values are + of the form + ``projects//instances//databases/``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.spanner_database_admin.Database: + A Cloud Spanner database. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = spanner_database_admin.GetDatabaseRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_database, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=3600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def update_database_ddl( + self, + request: spanner_database_admin.UpdateDatabaseDdlRequest = None, + *, + database: str = None, + statements: Sequence[str] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates the schema of a Cloud Spanner database by + creating/altering/dropping tables, columns, indexes, etc. The + returned [long-running operation][google.longrunning.Operation] + will have a name of the format + ``/operations/`` and can be used to + track execution of the schema change(s). The + [metadata][google.longrunning.Operation.metadata] field type is + [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. + The operation has no response. + + Args: + request (:class:`~.spanner_database_admin.UpdateDatabaseDdlRequest`): + The request object. Enqueues the given DDL statements to + be applied, in order but not necessarily all at once, to + the database schema at some point (or points) in the + future. The server checks that the statements are + executable (syntactically valid, name tables that exist, + etc.) before enqueueing them, but they may still fail + upon + later execution (e.g., if a statement from another batch + of statements is applied first and it conflicts in some + way, or if there is some data-related problem like a + `NULL` value in a column to which `NOT NULL` would be + added). If a statement fails, all subsequent statements + in the batch are automatically cancelled. + Each batch of statements is assigned a name which can be + used with the + [Operations][google.longrunning.Operations] API to + monitor progress. See the + [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id] + field for more details. + database (:class:`str`): + Required. The database to update. + This corresponds to the ``database`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + statements (:class:`Sequence[str]`): + Required. DDL statements to be + applied to the database. + This corresponds to the ``statements`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([database, statements]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = spanner_database_admin.UpdateDatabaseDdlRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if database is not None: + request.database = database + + if statements: + request.statements.extend(statements) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_database_ddl, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=3600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=spanner_database_admin.UpdateDatabaseDdlMetadata, + ) + + # Done; return the response. + return response + + async def drop_database( + self, + request: spanner_database_admin.DropDatabaseRequest = None, + *, + database: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Drops (aka deletes) a Cloud Spanner database. Completed backups + for the database will be retained according to their + ``expire_time``. + + Args: + request (:class:`~.spanner_database_admin.DropDatabaseRequest`): + The request object. The request for + [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase]. + database (:class:`str`): + Required. The database to be dropped. + This corresponds to the ``database`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([database]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = spanner_database_admin.DropDatabaseRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if database is not None: + request.database = database + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.drop_database, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=3600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + async def get_database_ddl( + self, + request: spanner_database_admin.GetDatabaseDdlRequest = None, + *, + database: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> spanner_database_admin.GetDatabaseDdlResponse: + r"""Returns the schema of a Cloud Spanner database as a list of + formatted DDL statements. This method does not show pending + schema updates, those may be queried using the + [Operations][google.longrunning.Operations] API. + + Args: + request (:class:`~.spanner_database_admin.GetDatabaseDdlRequest`): + The request object. The request for + [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. + database (:class:`str`): + Required. The database whose schema + we wish to get. + This corresponds to the ``database`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.spanner_database_admin.GetDatabaseDdlResponse: + The response for + [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([database]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = spanner_database_admin.GetDatabaseDdlRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if database is not None: + request.database = database + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_database_ddl, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=3600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: iam_policy.SetIamPolicyRequest = None, + *, + resource: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy.Policy: + r"""Sets the access control policy on a database or backup resource. + Replaces any existing policy. + + Authorization requires ``spanner.databases.setIamPolicy`` + permission on + [resource][google.iam.v1.SetIamPolicyRequest.resource]. For + backups, authorization requires ``spanner.backups.setIamPolicy`` + permission on + [resource][google.iam.v1.SetIamPolicyRequest.resource]. + + Args: + request (:class:`~.iam_policy.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy is being specified. See the + operation documentation for the + appropriate value for this field. + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.policy.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.SetIamPolicyRequest(**request) + + elif not request: + request = iam_policy.SetIamPolicyRequest(resource=resource,) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: iam_policy.GetIamPolicyRequest = None, + *, + resource: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy.Policy: + r"""Gets the access control policy for a database or backup + resource. Returns an empty policy if a database or backup exists + but does not have a policy set. + + Authorization requires ``spanner.databases.getIamPolicy`` + permission on + [resource][google.iam.v1.GetIamPolicyRequest.resource]. For + backups, authorization requires ``spanner.backups.getIamPolicy`` + permission on + [resource][google.iam.v1.GetIamPolicyRequest.resource]. + + Args: + request (:class:`~.iam_policy.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy is being requested. See the + operation documentation for the + appropriate value for this field. + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.policy.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.GetIamPolicyRequest(**request) + + elif not request: + request = iam_policy.GetIamPolicyRequest(resource=resource,) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_iam_policy, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy.TestIamPermissionsRequest = None, + *, + resource: str = None, + permissions: Sequence[str] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy.TestIamPermissionsResponse: + r"""Returns permissions that the caller has on the specified + database or backup resource. + + Attempting this RPC on a non-existent Cloud Spanner database + will result in a NOT_FOUND error if the user has + ``spanner.databases.list`` permission on the containing Cloud + Spanner instance. Otherwise returns an empty set of permissions. + Calling this method on a backup that does not exist will result + in a NOT_FOUND error if the user has ``spanner.backups.list`` + permission on the containing instance. + + Args: + request (:class:`~.iam_policy.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy detail is being requested. See + the operation documentation for the + appropriate value for this field. + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + permissions (:class:`Sequence[str]`): + The set of permissions to check for the ``resource``. + Permissions with wildcards (such as '*' or 'storage.*') + are not allowed. For more information see `IAM + Overview `__. + This corresponds to the ``permissions`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.iam_policy.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource, permissions]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.TestIamPermissionsRequest(**request) + + elif not request: + request = iam_policy.TestIamPermissionsRequest( + resource=resource, permissions=permissions, + ) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def create_backup( + self, + request: gsad_backup.CreateBackupRequest = None, + *, + parent: str = None, + backup: gsad_backup.Backup = None, + backup_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Starts creating a new Cloud Spanner Backup. The returned backup + [long-running operation][google.longrunning.Operation] will have + a name of the format + ``projects//instances//backups//operations/`` + and can be used to track creation of the backup. The + [metadata][google.longrunning.Operation.metadata] field type is + [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + The [response][google.longrunning.Operation.response] field type + is [Backup][google.spanner.admin.database.v1.Backup], if + successful. Cancelling the returned operation will stop the + creation and delete the backup. There can be only one pending + backup creation per database. Backup creation of different + databases can run concurrently. + + Args: + request (:class:`~.gsad_backup.CreateBackupRequest`): + The request object. The request for + [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]. + parent (:class:`str`): + Required. The name of the instance in which the backup + will be created. This must be the same instance that + contains the database the backup will be created from. + The backup will be stored in the location(s) specified + in the instance configuration of this instance. Values + are of the form + ``projects//instances/``. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backup (:class:`~.gsad_backup.Backup`): + Required. The backup to create. + This corresponds to the ``backup`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backup_id (:class:`str`): + Required. The id of the backup to be created. The + ``backup_id`` appended to ``parent`` forms the full + backup name of the form + ``projects//instances//backups/``. + This corresponds to the ``backup_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.gsad_backup.Backup``: A backup of a Cloud + Spanner database. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, backup, backup_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = gsad_backup.CreateBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if backup is not None: + request.backup = backup + if backup_id is not None: + request.backup_id = backup_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_backup, + default_timeout=3600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gsad_backup.Backup, + metadata_type=gsad_backup.CreateBackupMetadata, + ) + + # Done; return the response. + return response + + async def get_backup( + self, + request: backup.GetBackupRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> backup.Backup: + r"""Gets metadata on a pending or completed + [Backup][google.spanner.admin.database.v1.Backup]. + + Args: + request (:class:`~.backup.GetBackupRequest`): + The request object. The request for + [GetBackup][google.spanner.admin.database.v1.DatabaseAdmin.GetBackup]. + name (:class:`str`): + Required. Name of the backup. Values are of the form + ``projects//instances//backups/``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.backup.Backup: + A backup of a Cloud Spanner database. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = backup.GetBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_backup, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=3600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def update_backup( + self, + request: gsad_backup.UpdateBackupRequest = None, + *, + backup: gsad_backup.Backup = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gsad_backup.Backup: + r"""Updates a pending or completed + [Backup][google.spanner.admin.database.v1.Backup]. + + Args: + request (:class:`~.gsad_backup.UpdateBackupRequest`): + The request object. The request for + [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup]. + backup (:class:`~.gsad_backup.Backup`): + Required. The backup to update. ``backup.name``, and the + fields to be updated as specified by ``update_mask`` are + required. Other fields are ignored. Update is only + supported for the following fields: + + - ``backup.expire_time``. + + This corresponds to the ``backup`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`~.field_mask.FieldMask`): + Required. A mask specifying which fields (e.g. + ``expire_time``) in the Backup resource should be + updated. This mask is relative to the Backup resource, + not to the request message. The field mask must always + be specified; this prevents any future fields from being + erased accidentally by clients that do not know about + them. + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gsad_backup.Backup: + A backup of a Cloud Spanner database. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([backup, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = gsad_backup.UpdateBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if backup is not None: + request.backup = backup + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_backup, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=3600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("backup.name", request.backup.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_backup( + self, + request: backup.DeleteBackupRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a pending or completed + [Backup][google.spanner.admin.database.v1.Backup]. + + Args: + request (:class:`~.backup.DeleteBackupRequest`): + The request object. The request for + [DeleteBackup][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup]. + name (:class:`str`): + Required. Name of the backup to delete. Values are of + the form + ``projects//instances//backups/``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = backup.DeleteBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_backup, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=3600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + async def list_backups( + self, + request: backup.ListBackupsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListBackupsAsyncPager: + r"""Lists completed and pending backups. Backups returned are + ordered by ``create_time`` in descending order, starting from + the most recent ``create_time``. + + Args: + request (:class:`~.backup.ListBackupsRequest`): + The request object. The request for + [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]. + parent (:class:`str`): + Required. The instance to list backups from. Values are + of the form ``projects//instances/``. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListBackupsAsyncPager: + The response for + [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = backup.ListBackupsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_backups, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=3600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListBackupsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def restore_database( + self, + request: spanner_database_admin.RestoreDatabaseRequest = None, + *, + parent: str = None, + database_id: str = None, + backup: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Create a new database by restoring from a completed backup. The + new database must be in the same project and in an instance with + the same instance configuration as the instance containing the + backup. The returned database [long-running + operation][google.longrunning.Operation] has a name of the + format + ``projects//instances//databases//operations/``, + and can be used to track the progress of the operation, and to + cancel it. The [metadata][google.longrunning.Operation.metadata] + field type is + [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. + The [response][google.longrunning.Operation.response] type is + [Database][google.spanner.admin.database.v1.Database], if + successful. Cancelling the returned operation will stop the + restore and delete the database. There can be only one database + being restored into an instance at a time. Once the restore + operation completes, a new restore operation can be initiated, + without waiting for the optimize operation associated with the + first restore to complete. + + Args: + request (:class:`~.spanner_database_admin.RestoreDatabaseRequest`): + The request object. The request for + [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase]. + parent (:class:`str`): + Required. The name of the instance in which to create + the restored database. This instance must be in the same + project and have the same instance configuration as the + instance containing the source backup. Values are of the + form ``projects//instances/``. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + database_id (:class:`str`): + Required. The id of the database to create and restore + to. This database must not already exist. The + ``database_id`` appended to ``parent`` forms the full + database name of the form + ``projects//instances//databases/``. + This corresponds to the ``database_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backup (:class:`str`): + Name of the backup from which to restore. Values are of + the form + ``projects//instances//backups/``. + This corresponds to the ``backup`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.spanner_database_admin.Database``: A Cloud + Spanner database. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, database_id, backup]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = spanner_database_admin.RestoreDatabaseRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if database_id is not None: + request.database_id = database_id + if backup is not None: + request.backup = backup + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.restore_database, + default_timeout=3600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + spanner_database_admin.Database, + metadata_type=spanner_database_admin.RestoreDatabaseMetadata, + ) + + # Done; return the response. + return response + + async def list_database_operations( + self, + request: spanner_database_admin.ListDatabaseOperationsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDatabaseOperationsAsyncPager: + r"""Lists database + [longrunning-operations][google.longrunning.Operation]. A + database operation has a name of the form + ``projects//instances//databases//operations/``. + The long-running operation + [metadata][google.longrunning.Operation.metadata] field type + ``metadata.type_url`` describes the type of the metadata. + Operations returned include those that have + completed/failed/canceled within the last 7 days, and pending + operations. + + Args: + request (:class:`~.spanner_database_admin.ListDatabaseOperationsRequest`): + The request object. The request for + [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations]. + parent (:class:`str`): + Required. The instance of the database operations. + Values are of the form + ``projects//instances/``. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListDatabaseOperationsAsyncPager: + The response for + [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = spanner_database_admin.ListDatabaseOperationsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_database_operations, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=3600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListDatabaseOperationsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_backup_operations( + self, + request: backup.ListBackupOperationsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListBackupOperationsAsyncPager: + r"""Lists the backup [long-running + operations][google.longrunning.Operation] in the given instance. + A backup operation has a name of the form + ``projects//instances//backups//operations/``. + The long-running operation + [metadata][google.longrunning.Operation.metadata] field type + ``metadata.type_url`` describes the type of the metadata. + Operations returned include those that have + completed/failed/canceled within the last 7 days, and pending + operations. Operations returned are ordered by + ``operation.metadata.value.progress.start_time`` in descending + order starting from the most recently started operation. + + Args: + request (:class:`~.backup.ListBackupOperationsRequest`): + The request object. The request for + [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations]. + parent (:class:`str`): + Required. The instance of the backup operations. Values + are of the form + ``projects//instances/``. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListBackupOperationsAsyncPager: + The response for + [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = backup.ListBackupOperationsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_backup_operations, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=3600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListBackupOperationsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-spanner-admin-database", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("DatabaseAdminAsyncClient",) diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/client.py b/google/cloud/spanner_admin_database_v1/services/database_admin/client.py new file mode 100644 index 0000000000..3edfd9c9ed --- /dev/null +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/client.py @@ -0,0 +1,2047 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.spanner_admin_database_v1.services.database_admin import pagers +from google.cloud.spanner_admin_database_v1.types import backup +from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup +from google.cloud.spanner_admin_database_v1.types import spanner_database_admin +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.longrunning import operations_pb2 as operations # type: ignore +from google.protobuf import empty_pb2 as empty # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + +from .transports.base import DatabaseAdminTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import DatabaseAdminGrpcTransport +from .transports.grpc_asyncio import DatabaseAdminGrpcAsyncIOTransport + + +class DatabaseAdminClientMeta(type): + """Metaclass for the DatabaseAdmin client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = OrderedDict() # type: Dict[str, Type[DatabaseAdminTransport]] + _transport_registry["grpc"] = DatabaseAdminGrpcTransport + _transport_registry["grpc_asyncio"] = DatabaseAdminGrpcAsyncIOTransport + + def get_transport_class(cls, label: str = None,) -> Type[DatabaseAdminTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class DatabaseAdminClient(metaclass=DatabaseAdminClientMeta): + """Cloud Spanner Database Admin API + The Cloud Spanner Database Admin API can be used to create, + drop, and list databases. It also enables updating the schema of + pre-existing databases. It can be also used to create, delete + and list backups for a database and to restore from an existing + backup. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "spanner.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + {@api.name}: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> DatabaseAdminTransport: + """Return the transport used by the client instance. + + Returns: + DatabaseAdminTransport: The transport used by the client instance. + """ + return self._transport + + @staticmethod + def backup_path(project: str, instance: str, backup: str,) -> str: + """Return a fully-qualified backup string.""" + return "projects/{project}/instances/{instance}/backups/{backup}".format( + project=project, instance=instance, backup=backup, + ) + + @staticmethod + def parse_backup_path(path: str) -> Dict[str, str]: + """Parse a backup path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/instances/(?P.+?)/backups/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def database_path(project: str, instance: str, database: str,) -> str: + """Return a fully-qualified database string.""" + return "projects/{project}/instances/{instance}/databases/{database}".format( + project=project, instance=instance, database=database, + ) + + @staticmethod + def parse_database_path(path: str) -> Dict[str, str]: + """Parse a database path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/instances/(?P.+?)/databases/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def instance_path(project: str, instance: str,) -> str: + """Return a fully-qualified instance string.""" + return "projects/{project}/instances/{instance}".format( + project=project, instance=instance, + ) + + @staticmethod + def parse_instance_path(path: str) -> Dict[str, str]: + """Parse a instance path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str,) -> str: + """Return a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str,) -> str: + """Return a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder,) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str,) -> str: + """Return a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization,) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str,) -> str: + """Return a fully-qualified project string.""" + return "projects/{project}".format(project=project,) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str,) -> str: + """Return a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__( + self, + *, + credentials: Optional[credentials.Credentials] = None, + transport: Union[str, DatabaseAdminTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the database admin client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.DatabaseAdminTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (client_options_lib.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool( + util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + ) + + ssl_credentials = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + import grpc # type: ignore + + cert, key = client_options.client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + is_mtls = True + else: + creds = SslCredentials() + is_mtls = creds.is_mtls + ssl_credentials = creds.ssl_credentials if is_mtls else None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, DatabaseAdminTransport): + # transport is a DatabaseAdminTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + ssl_channel_credentials=ssl_credentials, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def list_databases( + self, + request: spanner_database_admin.ListDatabasesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDatabasesPager: + r"""Lists Cloud Spanner databases. + + Args: + request (:class:`~.spanner_database_admin.ListDatabasesRequest`): + The request object. The request for + [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. + parent (:class:`str`): + Required. The instance whose databases should be listed. + Values are of the form + ``projects//instances/``. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListDatabasesPager: + The response for + [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a spanner_database_admin.ListDatabasesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, spanner_database_admin.ListDatabasesRequest): + request = spanner_database_admin.ListDatabasesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_databases] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListDatabasesPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def create_database( + self, + request: spanner_database_admin.CreateDatabaseRequest = None, + *, + parent: str = None, + create_statement: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Creates a new Cloud Spanner database and starts to prepare it + for serving. The returned [long-running + operation][google.longrunning.Operation] will have a name of the + format ``/operations/`` and can be + used to track preparation of the database. The + [metadata][google.longrunning.Operation.metadata] field type is + [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. + The [response][google.longrunning.Operation.response] field type + is [Database][google.spanner.admin.database.v1.Database], if + successful. + + Args: + request (:class:`~.spanner_database_admin.CreateDatabaseRequest`): + The request object. The request for + [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase]. + parent (:class:`str`): + Required. The name of the instance that will serve the + new database. Values are of the form + ``projects//instances/``. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + create_statement (:class:`str`): + Required. A ``CREATE DATABASE`` statement, which + specifies the ID of the new database. The database ID + must conform to the regular expression + ``[a-z][a-z0-9_\-]*[a-z0-9]`` and be between 2 and 30 + characters in length. If the database ID is a reserved + word or if it contains a hyphen, the database ID must be + enclosed in backticks (:literal:`\``). + This corresponds to the ``create_statement`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.spanner_database_admin.Database``: A Cloud + Spanner database. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, create_statement]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a spanner_database_admin.CreateDatabaseRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, spanner_database_admin.CreateDatabaseRequest): + request = spanner_database_admin.CreateDatabaseRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if create_statement is not None: + request.create_statement = create_statement + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_database] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + spanner_database_admin.Database, + metadata_type=spanner_database_admin.CreateDatabaseMetadata, + ) + + # Done; return the response. + return response + + def get_database( + self, + request: spanner_database_admin.GetDatabaseRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> spanner_database_admin.Database: + r"""Gets the state of a Cloud Spanner database. + + Args: + request (:class:`~.spanner_database_admin.GetDatabaseRequest`): + The request object. The request for + [GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase]. + name (:class:`str`): + Required. The name of the requested database. Values are + of the form + ``projects//instances//databases/``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.spanner_database_admin.Database: + A Cloud Spanner database. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a spanner_database_admin.GetDatabaseRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, spanner_database_admin.GetDatabaseRequest): + request = spanner_database_admin.GetDatabaseRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_database] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def update_database_ddl( + self, + request: spanner_database_admin.UpdateDatabaseDdlRequest = None, + *, + database: str = None, + statements: Sequence[str] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Updates the schema of a Cloud Spanner database by + creating/altering/dropping tables, columns, indexes, etc. The + returned [long-running operation][google.longrunning.Operation] + will have a name of the format + ``/operations/`` and can be used to + track execution of the schema change(s). The + [metadata][google.longrunning.Operation.metadata] field type is + [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. + The operation has no response. + + Args: + request (:class:`~.spanner_database_admin.UpdateDatabaseDdlRequest`): + The request object. Enqueues the given DDL statements to + be applied, in order but not necessarily all at once, to + the database schema at some point (or points) in the + future. The server checks that the statements are + executable (syntactically valid, name tables that exist, + etc.) before enqueueing them, but they may still fail + upon + later execution (e.g., if a statement from another batch + of statements is applied first and it conflicts in some + way, or if there is some data-related problem like a + `NULL` value in a column to which `NOT NULL` would be + added). If a statement fails, all subsequent statements + in the batch are automatically cancelled. + Each batch of statements is assigned a name which can be + used with the + [Operations][google.longrunning.Operations] API to + monitor progress. See the + [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id] + field for more details. + database (:class:`str`): + Required. The database to update. + This corresponds to the ``database`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + statements (:class:`Sequence[str]`): + Required. DDL statements to be + applied to the database. + This corresponds to the ``statements`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([database, statements]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a spanner_database_admin.UpdateDatabaseDdlRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, spanner_database_admin.UpdateDatabaseDdlRequest): + request = spanner_database_admin.UpdateDatabaseDdlRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if database is not None: + request.database = database + + if statements: + request.statements.extend(statements) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_database_ddl] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=spanner_database_admin.UpdateDatabaseDdlMetadata, + ) + + # Done; return the response. + return response + + def drop_database( + self, + request: spanner_database_admin.DropDatabaseRequest = None, + *, + database: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Drops (aka deletes) a Cloud Spanner database. Completed backups + for the database will be retained according to their + ``expire_time``. + + Args: + request (:class:`~.spanner_database_admin.DropDatabaseRequest`): + The request object. The request for + [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase]. + database (:class:`str`): + Required. The database to be dropped. + This corresponds to the ``database`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([database]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a spanner_database_admin.DropDatabaseRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, spanner_database_admin.DropDatabaseRequest): + request = spanner_database_admin.DropDatabaseRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if database is not None: + request.database = database + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.drop_database] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + def get_database_ddl( + self, + request: spanner_database_admin.GetDatabaseDdlRequest = None, + *, + database: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> spanner_database_admin.GetDatabaseDdlResponse: + r"""Returns the schema of a Cloud Spanner database as a list of + formatted DDL statements. This method does not show pending + schema updates, those may be queried using the + [Operations][google.longrunning.Operations] API. + + Args: + request (:class:`~.spanner_database_admin.GetDatabaseDdlRequest`): + The request object. The request for + [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. + database (:class:`str`): + Required. The database whose schema + we wish to get. + This corresponds to the ``database`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.spanner_database_admin.GetDatabaseDdlResponse: + The response for + [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([database]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a spanner_database_admin.GetDatabaseDdlRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, spanner_database_admin.GetDatabaseDdlRequest): + request = spanner_database_admin.GetDatabaseDdlRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if database is not None: + request.database = database + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_database_ddl] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: iam_policy.SetIamPolicyRequest = None, + *, + resource: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy.Policy: + r"""Sets the access control policy on a database or backup resource. + Replaces any existing policy. + + Authorization requires ``spanner.databases.setIamPolicy`` + permission on + [resource][google.iam.v1.SetIamPolicyRequest.resource]. For + backups, authorization requires ``spanner.backups.setIamPolicy`` + permission on + [resource][google.iam.v1.SetIamPolicyRequest.resource]. + + Args: + request (:class:`~.iam_policy.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy is being specified. See the + operation documentation for the + appropriate value for this field. + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.policy.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.SetIamPolicyRequest(**request) + + elif not request: + request = iam_policy.SetIamPolicyRequest(resource=resource,) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: iam_policy.GetIamPolicyRequest = None, + *, + resource: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy.Policy: + r"""Gets the access control policy for a database or backup + resource. Returns an empty policy if a database or backup exists + but does not have a policy set. + + Authorization requires ``spanner.databases.getIamPolicy`` + permission on + [resource][google.iam.v1.GetIamPolicyRequest.resource]. For + backups, authorization requires ``spanner.backups.getIamPolicy`` + permission on + [resource][google.iam.v1.GetIamPolicyRequest.resource]. + + Args: + request (:class:`~.iam_policy.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy is being requested. See the + operation documentation for the + appropriate value for this field. + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.policy.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.GetIamPolicyRequest(**request) + + elif not request: + request = iam_policy.GetIamPolicyRequest(resource=resource,) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy.TestIamPermissionsRequest = None, + *, + resource: str = None, + permissions: Sequence[str] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy.TestIamPermissionsResponse: + r"""Returns permissions that the caller has on the specified + database or backup resource. + + Attempting this RPC on a non-existent Cloud Spanner database + will result in a NOT_FOUND error if the user has + ``spanner.databases.list`` permission on the containing Cloud + Spanner instance. Otherwise returns an empty set of permissions. + Calling this method on a backup that does not exist will result + in a NOT_FOUND error if the user has ``spanner.backups.list`` + permission on the containing instance. + + Args: + request (:class:`~.iam_policy.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy detail is being requested. See + the operation documentation for the + appropriate value for this field. + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + permissions (:class:`Sequence[str]`): + The set of permissions to check for the ``resource``. + Permissions with wildcards (such as '*' or 'storage.*') + are not allowed. For more information see `IAM + Overview `__. + This corresponds to the ``permissions`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.iam_policy.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource, permissions]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.TestIamPermissionsRequest(**request) + + elif not request: + request = iam_policy.TestIamPermissionsRequest( + resource=resource, permissions=permissions, + ) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def create_backup( + self, + request: gsad_backup.CreateBackupRequest = None, + *, + parent: str = None, + backup: gsad_backup.Backup = None, + backup_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Starts creating a new Cloud Spanner Backup. The returned backup + [long-running operation][google.longrunning.Operation] will have + a name of the format + ``projects//instances//backups//operations/`` + and can be used to track creation of the backup. The + [metadata][google.longrunning.Operation.metadata] field type is + [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + The [response][google.longrunning.Operation.response] field type + is [Backup][google.spanner.admin.database.v1.Backup], if + successful. Cancelling the returned operation will stop the + creation and delete the backup. There can be only one pending + backup creation per database. Backup creation of different + databases can run concurrently. + + Args: + request (:class:`~.gsad_backup.CreateBackupRequest`): + The request object. The request for + [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]. + parent (:class:`str`): + Required. The name of the instance in which the backup + will be created. This must be the same instance that + contains the database the backup will be created from. + The backup will be stored in the location(s) specified + in the instance configuration of this instance. Values + are of the form + ``projects//instances/``. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backup (:class:`~.gsad_backup.Backup`): + Required. The backup to create. + This corresponds to the ``backup`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backup_id (:class:`str`): + Required. The id of the backup to be created. The + ``backup_id`` appended to ``parent`` forms the full + backup name of the form + ``projects//instances//backups/``. + This corresponds to the ``backup_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.gsad_backup.Backup``: A backup of a Cloud + Spanner database. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, backup, backup_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a gsad_backup.CreateBackupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, gsad_backup.CreateBackupRequest): + request = gsad_backup.CreateBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if backup is not None: + request.backup = backup + if backup_id is not None: + request.backup_id = backup_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_backup] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + gsad_backup.Backup, + metadata_type=gsad_backup.CreateBackupMetadata, + ) + + # Done; return the response. + return response + + def get_backup( + self, + request: backup.GetBackupRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> backup.Backup: + r"""Gets metadata on a pending or completed + [Backup][google.spanner.admin.database.v1.Backup]. + + Args: + request (:class:`~.backup.GetBackupRequest`): + The request object. The request for + [GetBackup][google.spanner.admin.database.v1.DatabaseAdmin.GetBackup]. + name (:class:`str`): + Required. Name of the backup. Values are of the form + ``projects//instances//backups/``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.backup.Backup: + A backup of a Cloud Spanner database. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a backup.GetBackupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, backup.GetBackupRequest): + request = backup.GetBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_backup] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def update_backup( + self, + request: gsad_backup.UpdateBackupRequest = None, + *, + backup: gsad_backup.Backup = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gsad_backup.Backup: + r"""Updates a pending or completed + [Backup][google.spanner.admin.database.v1.Backup]. + + Args: + request (:class:`~.gsad_backup.UpdateBackupRequest`): + The request object. The request for + [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup]. + backup (:class:`~.gsad_backup.Backup`): + Required. The backup to update. ``backup.name``, and the + fields to be updated as specified by ``update_mask`` are + required. Other fields are ignored. Update is only + supported for the following fields: + + - ``backup.expire_time``. + + This corresponds to the ``backup`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`~.field_mask.FieldMask`): + Required. A mask specifying which fields (e.g. + ``expire_time``) in the Backup resource should be + updated. This mask is relative to the Backup resource, + not to the request message. The field mask must always + be specified; this prevents any future fields from being + erased accidentally by clients that do not know about + them. + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gsad_backup.Backup: + A backup of a Cloud Spanner database. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([backup, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a gsad_backup.UpdateBackupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, gsad_backup.UpdateBackupRequest): + request = gsad_backup.UpdateBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if backup is not None: + request.backup = backup + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_backup] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("backup.name", request.backup.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_backup( + self, + request: backup.DeleteBackupRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a pending or completed + [Backup][google.spanner.admin.database.v1.Backup]. + + Args: + request (:class:`~.backup.DeleteBackupRequest`): + The request object. The request for + [DeleteBackup][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup]. + name (:class:`str`): + Required. Name of the backup to delete. Values are of + the form + ``projects//instances//backups/``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a backup.DeleteBackupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, backup.DeleteBackupRequest): + request = backup.DeleteBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_backup] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + def list_backups( + self, + request: backup.ListBackupsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListBackupsPager: + r"""Lists completed and pending backups. Backups returned are + ordered by ``create_time`` in descending order, starting from + the most recent ``create_time``. + + Args: + request (:class:`~.backup.ListBackupsRequest`): + The request object. The request for + [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]. + parent (:class:`str`): + Required. The instance to list backups from. Values are + of the form ``projects//instances/``. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListBackupsPager: + The response for + [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a backup.ListBackupsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, backup.ListBackupsRequest): + request = backup.ListBackupsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_backups] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListBackupsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def restore_database( + self, + request: spanner_database_admin.RestoreDatabaseRequest = None, + *, + parent: str = None, + database_id: str = None, + backup: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Create a new database by restoring from a completed backup. The + new database must be in the same project and in an instance with + the same instance configuration as the instance containing the + backup. The returned database [long-running + operation][google.longrunning.Operation] has a name of the + format + ``projects//instances//databases//operations/``, + and can be used to track the progress of the operation, and to + cancel it. The [metadata][google.longrunning.Operation.metadata] + field type is + [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. + The [response][google.longrunning.Operation.response] type is + [Database][google.spanner.admin.database.v1.Database], if + successful. Cancelling the returned operation will stop the + restore and delete the database. There can be only one database + being restored into an instance at a time. Once the restore + operation completes, a new restore operation can be initiated, + without waiting for the optimize operation associated with the + first restore to complete. + + Args: + request (:class:`~.spanner_database_admin.RestoreDatabaseRequest`): + The request object. The request for + [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase]. + parent (:class:`str`): + Required. The name of the instance in which to create + the restored database. This instance must be in the same + project and have the same instance configuration as the + instance containing the source backup. Values are of the + form ``projects//instances/``. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + database_id (:class:`str`): + Required. The id of the database to create and restore + to. This database must not already exist. The + ``database_id`` appended to ``parent`` forms the full + database name of the form + ``projects//instances//databases/``. + This corresponds to the ``database_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backup (:class:`str`): + Name of the backup from which to restore. Values are of + the form + ``projects//instances//backups/``. + This corresponds to the ``backup`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.spanner_database_admin.Database``: A Cloud + Spanner database. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, database_id, backup]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a spanner_database_admin.RestoreDatabaseRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, spanner_database_admin.RestoreDatabaseRequest): + request = spanner_database_admin.RestoreDatabaseRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if database_id is not None: + request.database_id = database_id + if backup is not None: + request.backup = backup + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.restore_database] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + spanner_database_admin.Database, + metadata_type=spanner_database_admin.RestoreDatabaseMetadata, + ) + + # Done; return the response. + return response + + def list_database_operations( + self, + request: spanner_database_admin.ListDatabaseOperationsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDatabaseOperationsPager: + r"""Lists database + [longrunning-operations][google.longrunning.Operation]. A + database operation has a name of the form + ``projects//instances//databases//operations/``. + The long-running operation + [metadata][google.longrunning.Operation.metadata] field type + ``metadata.type_url`` describes the type of the metadata. + Operations returned include those that have + completed/failed/canceled within the last 7 days, and pending + operations. + + Args: + request (:class:`~.spanner_database_admin.ListDatabaseOperationsRequest`): + The request object. The request for + [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations]. + parent (:class:`str`): + Required. The instance of the database operations. + Values are of the form + ``projects//instances/``. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListDatabaseOperationsPager: + The response for + [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a spanner_database_admin.ListDatabaseOperationsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, spanner_database_admin.ListDatabaseOperationsRequest + ): + request = spanner_database_admin.ListDatabaseOperationsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_database_operations] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListDatabaseOperationsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def list_backup_operations( + self, + request: backup.ListBackupOperationsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListBackupOperationsPager: + r"""Lists the backup [long-running + operations][google.longrunning.Operation] in the given instance. + A backup operation has a name of the form + ``projects//instances//backups//operations/``. + The long-running operation + [metadata][google.longrunning.Operation.metadata] field type + ``metadata.type_url`` describes the type of the metadata. + Operations returned include those that have + completed/failed/canceled within the last 7 days, and pending + operations. Operations returned are ordered by + ``operation.metadata.value.progress.start_time`` in descending + order starting from the most recently started operation. + + Args: + request (:class:`~.backup.ListBackupOperationsRequest`): + The request object. The request for + [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations]. + parent (:class:`str`): + Required. The instance of the backup operations. Values + are of the form + ``projects//instances/``. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListBackupOperationsPager: + The response for + [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a backup.ListBackupOperationsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, backup.ListBackupOperationsRequest): + request = backup.ListBackupOperationsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_backup_operations] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListBackupOperationsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-spanner-admin-database", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("DatabaseAdminClient",) diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/pagers.py b/google/cloud/spanner_admin_database_v1/services/database_admin/pagers.py new file mode 100644 index 0000000000..ee2a12f33e --- /dev/null +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/pagers.py @@ -0,0 +1,540 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple + +from google.cloud.spanner_admin_database_v1.types import backup +from google.cloud.spanner_admin_database_v1.types import spanner_database_admin +from google.longrunning import operations_pb2 as operations # type: ignore + + +class ListDatabasesPager: + """A pager for iterating through ``list_databases`` requests. + + This class thinly wraps an initial + :class:`~.spanner_database_admin.ListDatabasesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``databases`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListDatabases`` requests and continue to iterate + through the ``databases`` field on the + corresponding responses. + + All the usual :class:`~.spanner_database_admin.ListDatabasesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., spanner_database_admin.ListDatabasesResponse], + request: spanner_database_admin.ListDatabasesRequest, + response: spanner_database_admin.ListDatabasesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.spanner_database_admin.ListDatabasesRequest`): + The initial request object. + response (:class:`~.spanner_database_admin.ListDatabasesResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = spanner_database_admin.ListDatabasesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[spanner_database_admin.ListDatabasesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[spanner_database_admin.Database]: + for page in self.pages: + yield from page.databases + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListDatabasesAsyncPager: + """A pager for iterating through ``list_databases`` requests. + + This class thinly wraps an initial + :class:`~.spanner_database_admin.ListDatabasesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``databases`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListDatabases`` requests and continue to iterate + through the ``databases`` field on the + corresponding responses. + + All the usual :class:`~.spanner_database_admin.ListDatabasesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[spanner_database_admin.ListDatabasesResponse]], + request: spanner_database_admin.ListDatabasesRequest, + response: spanner_database_admin.ListDatabasesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.spanner_database_admin.ListDatabasesRequest`): + The initial request object. + response (:class:`~.spanner_database_admin.ListDatabasesResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = spanner_database_admin.ListDatabasesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterable[spanner_database_admin.ListDatabasesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[spanner_database_admin.Database]: + async def async_generator(): + async for page in self.pages: + for response in page.databases: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListBackupsPager: + """A pager for iterating through ``list_backups`` requests. + + This class thinly wraps an initial + :class:`~.backup.ListBackupsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``backups`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListBackups`` requests and continue to iterate + through the ``backups`` field on the + corresponding responses. + + All the usual :class:`~.backup.ListBackupsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., backup.ListBackupsResponse], + request: backup.ListBackupsRequest, + response: backup.ListBackupsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.backup.ListBackupsRequest`): + The initial request object. + response (:class:`~.backup.ListBackupsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = backup.ListBackupsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[backup.ListBackupsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[backup.Backup]: + for page in self.pages: + yield from page.backups + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListBackupsAsyncPager: + """A pager for iterating through ``list_backups`` requests. + + This class thinly wraps an initial + :class:`~.backup.ListBackupsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``backups`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListBackups`` requests and continue to iterate + through the ``backups`` field on the + corresponding responses. + + All the usual :class:`~.backup.ListBackupsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[backup.ListBackupsResponse]], + request: backup.ListBackupsRequest, + response: backup.ListBackupsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.backup.ListBackupsRequest`): + The initial request object. + response (:class:`~.backup.ListBackupsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = backup.ListBackupsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[backup.ListBackupsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[backup.Backup]: + async def async_generator(): + async for page in self.pages: + for response in page.backups: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListDatabaseOperationsPager: + """A pager for iterating through ``list_database_operations`` requests. + + This class thinly wraps an initial + :class:`~.spanner_database_admin.ListDatabaseOperationsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``operations`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListDatabaseOperations`` requests and continue to iterate + through the ``operations`` field on the + corresponding responses. + + All the usual :class:`~.spanner_database_admin.ListDatabaseOperationsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., spanner_database_admin.ListDatabaseOperationsResponse], + request: spanner_database_admin.ListDatabaseOperationsRequest, + response: spanner_database_admin.ListDatabaseOperationsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.spanner_database_admin.ListDatabaseOperationsRequest`): + The initial request object. + response (:class:`~.spanner_database_admin.ListDatabaseOperationsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = spanner_database_admin.ListDatabaseOperationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[spanner_database_admin.ListDatabaseOperationsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[operations.Operation]: + for page in self.pages: + yield from page.operations + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListDatabaseOperationsAsyncPager: + """A pager for iterating through ``list_database_operations`` requests. + + This class thinly wraps an initial + :class:`~.spanner_database_admin.ListDatabaseOperationsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``operations`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListDatabaseOperations`` requests and continue to iterate + through the ``operations`` field on the + corresponding responses. + + All the usual :class:`~.spanner_database_admin.ListDatabaseOperationsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[spanner_database_admin.ListDatabaseOperationsResponse] + ], + request: spanner_database_admin.ListDatabaseOperationsRequest, + response: spanner_database_admin.ListDatabaseOperationsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.spanner_database_admin.ListDatabaseOperationsRequest`): + The initial request object. + response (:class:`~.spanner_database_admin.ListDatabaseOperationsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = spanner_database_admin.ListDatabaseOperationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterable[spanner_database_admin.ListDatabaseOperationsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[operations.Operation]: + async def async_generator(): + async for page in self.pages: + for response in page.operations: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListBackupOperationsPager: + """A pager for iterating through ``list_backup_operations`` requests. + + This class thinly wraps an initial + :class:`~.backup.ListBackupOperationsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``operations`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListBackupOperations`` requests and continue to iterate + through the ``operations`` field on the + corresponding responses. + + All the usual :class:`~.backup.ListBackupOperationsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., backup.ListBackupOperationsResponse], + request: backup.ListBackupOperationsRequest, + response: backup.ListBackupOperationsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.backup.ListBackupOperationsRequest`): + The initial request object. + response (:class:`~.backup.ListBackupOperationsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = backup.ListBackupOperationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[backup.ListBackupOperationsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[operations.Operation]: + for page in self.pages: + yield from page.operations + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListBackupOperationsAsyncPager: + """A pager for iterating through ``list_backup_operations`` requests. + + This class thinly wraps an initial + :class:`~.backup.ListBackupOperationsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``operations`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListBackupOperations`` requests and continue to iterate + through the ``operations`` field on the + corresponding responses. + + All the usual :class:`~.backup.ListBackupOperationsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[backup.ListBackupOperationsResponse]], + request: backup.ListBackupOperationsRequest, + response: backup.ListBackupOperationsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.backup.ListBackupOperationsRequest`): + The initial request object. + response (:class:`~.backup.ListBackupOperationsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = backup.ListBackupOperationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[backup.ListBackupOperationsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[operations.Operation]: + async def async_generator(): + async for page in self.pages: + for response in page.operations: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/__init__.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/__init__.py new file mode 100644 index 0000000000..348af3f043 --- /dev/null +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/__init__.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import DatabaseAdminTransport +from .grpc import DatabaseAdminGrpcTransport +from .grpc_asyncio import DatabaseAdminGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[DatabaseAdminTransport]] +_transport_registry["grpc"] = DatabaseAdminGrpcTransport +_transport_registry["grpc_asyncio"] = DatabaseAdminGrpcAsyncIOTransport + + +__all__ = ( + "DatabaseAdminTransport", + "DatabaseAdminGrpcTransport", + "DatabaseAdminGrpcAsyncIOTransport", +) diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/base.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/base.py new file mode 100644 index 0000000000..779f02e840 --- /dev/null +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/base.py @@ -0,0 +1,473 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.spanner_admin_database_v1.types import backup +from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup +from google.cloud.spanner_admin_database_v1.types import spanner_database_admin +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.longrunning import operations_pb2 as operations # type: ignore +from google.protobuf import empty_pb2 as empty # type: ignore + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-spanner-admin-database", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class DatabaseAdminTransport(abc.ABC): + """Abstract transport class for DatabaseAdmin.""" + + AUTH_SCOPES = ( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.admin", + ) + + def __init__( + self, + *, + host: str = "spanner.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages(client_info) + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.list_databases: gapic_v1.method.wrap_method( + self.list_databases, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=3600.0, + client_info=client_info, + ), + self.create_database: gapic_v1.method.wrap_method( + self.create_database, default_timeout=3600.0, client_info=client_info, + ), + self.get_database: gapic_v1.method.wrap_method( + self.get_database, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=3600.0, + client_info=client_info, + ), + self.update_database_ddl: gapic_v1.method.wrap_method( + self.update_database_ddl, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=3600.0, + client_info=client_info, + ), + self.drop_database: gapic_v1.method.wrap_method( + self.drop_database, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=3600.0, + client_info=client_info, + ), + self.get_database_ddl: gapic_v1.method.wrap_method( + self.get_database_ddl, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=3600.0, + client_info=client_info, + ), + self.set_iam_policy: gapic_v1.method.wrap_method( + self.set_iam_policy, default_timeout=30.0, client_info=client_info, + ), + self.get_iam_policy: gapic_v1.method.wrap_method( + self.get_iam_policy, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.test_iam_permissions: gapic_v1.method.wrap_method( + self.test_iam_permissions, + default_timeout=30.0, + client_info=client_info, + ), + self.create_backup: gapic_v1.method.wrap_method( + self.create_backup, default_timeout=3600.0, client_info=client_info, + ), + self.get_backup: gapic_v1.method.wrap_method( + self.get_backup, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=3600.0, + client_info=client_info, + ), + self.update_backup: gapic_v1.method.wrap_method( + self.update_backup, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=3600.0, + client_info=client_info, + ), + self.delete_backup: gapic_v1.method.wrap_method( + self.delete_backup, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=3600.0, + client_info=client_info, + ), + self.list_backups: gapic_v1.method.wrap_method( + self.list_backups, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=3600.0, + client_info=client_info, + ), + self.restore_database: gapic_v1.method.wrap_method( + self.restore_database, default_timeout=3600.0, client_info=client_info, + ), + self.list_database_operations: gapic_v1.method.wrap_method( + self.list_database_operations, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=3600.0, + client_info=client_info, + ), + self.list_backup_operations: gapic_v1.method.wrap_method( + self.list_backup_operations, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=3600.0, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def list_databases( + self, + ) -> typing.Callable[ + [spanner_database_admin.ListDatabasesRequest], + typing.Union[ + spanner_database_admin.ListDatabasesResponse, + typing.Awaitable[spanner_database_admin.ListDatabasesResponse], + ], + ]: + raise NotImplementedError() + + @property + def create_database( + self, + ) -> typing.Callable[ + [spanner_database_admin.CreateDatabaseRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def get_database( + self, + ) -> typing.Callable[ + [spanner_database_admin.GetDatabaseRequest], + typing.Union[ + spanner_database_admin.Database, + typing.Awaitable[spanner_database_admin.Database], + ], + ]: + raise NotImplementedError() + + @property + def update_database_ddl( + self, + ) -> typing.Callable[ + [spanner_database_admin.UpdateDatabaseDdlRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def drop_database( + self, + ) -> typing.Callable[ + [spanner_database_admin.DropDatabaseRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + @property + def get_database_ddl( + self, + ) -> typing.Callable[ + [spanner_database_admin.GetDatabaseDdlRequest], + typing.Union[ + spanner_database_admin.GetDatabaseDdlResponse, + typing.Awaitable[spanner_database_admin.GetDatabaseDdlResponse], + ], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> typing.Callable[ + [iam_policy.SetIamPolicyRequest], + typing.Union[policy.Policy, typing.Awaitable[policy.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> typing.Callable[ + [iam_policy.GetIamPolicyRequest], + typing.Union[policy.Policy, typing.Awaitable[policy.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> typing.Callable[ + [iam_policy.TestIamPermissionsRequest], + typing.Union[ + iam_policy.TestIamPermissionsResponse, + typing.Awaitable[iam_policy.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def create_backup( + self, + ) -> typing.Callable[ + [gsad_backup.CreateBackupRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def get_backup( + self, + ) -> typing.Callable[ + [backup.GetBackupRequest], + typing.Union[backup.Backup, typing.Awaitable[backup.Backup]], + ]: + raise NotImplementedError() + + @property + def update_backup( + self, + ) -> typing.Callable[ + [gsad_backup.UpdateBackupRequest], + typing.Union[gsad_backup.Backup, typing.Awaitable[gsad_backup.Backup]], + ]: + raise NotImplementedError() + + @property + def delete_backup( + self, + ) -> typing.Callable[ + [backup.DeleteBackupRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + @property + def list_backups( + self, + ) -> typing.Callable[ + [backup.ListBackupsRequest], + typing.Union[ + backup.ListBackupsResponse, typing.Awaitable[backup.ListBackupsResponse] + ], + ]: + raise NotImplementedError() + + @property + def restore_database( + self, + ) -> typing.Callable[ + [spanner_database_admin.RestoreDatabaseRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def list_database_operations( + self, + ) -> typing.Callable[ + [spanner_database_admin.ListDatabaseOperationsRequest], + typing.Union[ + spanner_database_admin.ListDatabaseOperationsResponse, + typing.Awaitable[spanner_database_admin.ListDatabaseOperationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def list_backup_operations( + self, + ) -> typing.Callable[ + [backup.ListBackupOperationsRequest], + typing.Union[ + backup.ListBackupOperationsResponse, + typing.Awaitable[backup.ListBackupOperationsResponse], + ], + ]: + raise NotImplementedError() + + +__all__ = ("DatabaseAdminTransport",) diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py new file mode 100644 index 0000000000..0f8d56f05a --- /dev/null +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py @@ -0,0 +1,817 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.spanner_admin_database_v1.types import backup +from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup +from google.cloud.spanner_admin_database_v1.types import spanner_database_admin +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.longrunning import operations_pb2 as operations # type: ignore +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import DatabaseAdminTransport, DEFAULT_CLIENT_INFO + + +class DatabaseAdminGrpcTransport(DatabaseAdminTransport): + """gRPC backend transport for DatabaseAdmin. + + Cloud Spanner Database Admin API + The Cloud Spanner Database Admin API can be used to create, + drop, and list databases. It also enables updating the schema of + pre-existing databases. It can be also used to create, delete + and list backups for a database and to restore from an existing + backup. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "spanner.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} # type: Dict[str, Callable] + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + @classmethod + def create_channel( + cls, + host: str = "spanner.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optionsl[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if "operations_client" not in self.__dict__: + self.__dict__["operations_client"] = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self.__dict__["operations_client"] + + @property + def list_databases( + self, + ) -> Callable[ + [spanner_database_admin.ListDatabasesRequest], + spanner_database_admin.ListDatabasesResponse, + ]: + r"""Return a callable for the list databases method over gRPC. + + Lists Cloud Spanner databases. + + Returns: + Callable[[~.ListDatabasesRequest], + ~.ListDatabasesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_databases" not in self._stubs: + self._stubs["list_databases"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabases", + request_serializer=spanner_database_admin.ListDatabasesRequest.serialize, + response_deserializer=spanner_database_admin.ListDatabasesResponse.deserialize, + ) + return self._stubs["list_databases"] + + @property + def create_database( + self, + ) -> Callable[[spanner_database_admin.CreateDatabaseRequest], operations.Operation]: + r"""Return a callable for the create database method over gRPC. + + Creates a new Cloud Spanner database and starts to prepare it + for serving. The returned [long-running + operation][google.longrunning.Operation] will have a name of the + format ``/operations/`` and can be + used to track preparation of the database. The + [metadata][google.longrunning.Operation.metadata] field type is + [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. + The [response][google.longrunning.Operation.response] field type + is [Database][google.spanner.admin.database.v1.Database], if + successful. + + Returns: + Callable[[~.CreateDatabaseRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_database" not in self._stubs: + self._stubs["create_database"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/CreateDatabase", + request_serializer=spanner_database_admin.CreateDatabaseRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_database"] + + @property + def get_database( + self, + ) -> Callable[ + [spanner_database_admin.GetDatabaseRequest], spanner_database_admin.Database + ]: + r"""Return a callable for the get database method over gRPC. + + Gets the state of a Cloud Spanner database. + + Returns: + Callable[[~.GetDatabaseRequest], + ~.Database]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_database" not in self._stubs: + self._stubs["get_database"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabase", + request_serializer=spanner_database_admin.GetDatabaseRequest.serialize, + response_deserializer=spanner_database_admin.Database.deserialize, + ) + return self._stubs["get_database"] + + @property + def update_database_ddl( + self, + ) -> Callable[ + [spanner_database_admin.UpdateDatabaseDdlRequest], operations.Operation + ]: + r"""Return a callable for the update database ddl method over gRPC. + + Updates the schema of a Cloud Spanner database by + creating/altering/dropping tables, columns, indexes, etc. The + returned [long-running operation][google.longrunning.Operation] + will have a name of the format + ``/operations/`` and can be used to + track execution of the schema change(s). The + [metadata][google.longrunning.Operation.metadata] field type is + [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. + The operation has no response. + + Returns: + Callable[[~.UpdateDatabaseDdlRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_database_ddl" not in self._stubs: + self._stubs["update_database_ddl"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabaseDdl", + request_serializer=spanner_database_admin.UpdateDatabaseDdlRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["update_database_ddl"] + + @property + def drop_database( + self, + ) -> Callable[[spanner_database_admin.DropDatabaseRequest], empty.Empty]: + r"""Return a callable for the drop database method over gRPC. + + Drops (aka deletes) a Cloud Spanner database. Completed backups + for the database will be retained according to their + ``expire_time``. + + Returns: + Callable[[~.DropDatabaseRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "drop_database" not in self._stubs: + self._stubs["drop_database"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/DropDatabase", + request_serializer=spanner_database_admin.DropDatabaseRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["drop_database"] + + @property + def get_database_ddl( + self, + ) -> Callable[ + [spanner_database_admin.GetDatabaseDdlRequest], + spanner_database_admin.GetDatabaseDdlResponse, + ]: + r"""Return a callable for the get database ddl method over gRPC. + + Returns the schema of a Cloud Spanner database as a list of + formatted DDL statements. This method does not show pending + schema updates, those may be queried using the + [Operations][google.longrunning.Operations] API. + + Returns: + Callable[[~.GetDatabaseDdlRequest], + ~.GetDatabaseDdlResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_database_ddl" not in self._stubs: + self._stubs["get_database_ddl"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabaseDdl", + request_serializer=spanner_database_admin.GetDatabaseDdlRequest.serialize, + response_deserializer=spanner_database_admin.GetDatabaseDdlResponse.deserialize, + ) + return self._stubs["get_database_ddl"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy.SetIamPolicyRequest], policy.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + + Sets the access control policy on a database or backup resource. + Replaces any existing policy. + + Authorization requires ``spanner.databases.setIamPolicy`` + permission on + [resource][google.iam.v1.SetIamPolicyRequest.resource]. For + backups, authorization requires ``spanner.backups.setIamPolicy`` + permission on + [resource][google.iam.v1.SetIamPolicyRequest.resource]. + + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/SetIamPolicy", + request_serializer=iam_policy.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy.GetIamPolicyRequest], policy.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + + Gets the access control policy for a database or backup + resource. Returns an empty policy if a database or backup exists + but does not have a policy set. + + Authorization requires ``spanner.databases.getIamPolicy`` + permission on + [resource][google.iam.v1.GetIamPolicyRequest.resource]. For + backups, authorization requires ``spanner.backups.getIamPolicy`` + permission on + [resource][google.iam.v1.GetIamPolicyRequest.resource]. + + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/GetIamPolicy", + request_serializer=iam_policy.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy.TestIamPermissionsRequest], iam_policy.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + + Returns permissions that the caller has on the specified + database or backup resource. + + Attempting this RPC on a non-existent Cloud Spanner database + will result in a NOT_FOUND error if the user has + ``spanner.databases.list`` permission on the containing Cloud + Spanner instance. Otherwise returns an empty set of permissions. + Calling this method on a backup that does not exist will result + in a NOT_FOUND error if the user has ``spanner.backups.list`` + permission on the containing instance. + + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/TestIamPermissions", + request_serializer=iam_policy.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def create_backup( + self, + ) -> Callable[[gsad_backup.CreateBackupRequest], operations.Operation]: + r"""Return a callable for the create backup method over gRPC. + + Starts creating a new Cloud Spanner Backup. The returned backup + [long-running operation][google.longrunning.Operation] will have + a name of the format + ``projects//instances//backups//operations/`` + and can be used to track creation of the backup. The + [metadata][google.longrunning.Operation.metadata] field type is + [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + The [response][google.longrunning.Operation.response] field type + is [Backup][google.spanner.admin.database.v1.Backup], if + successful. Cancelling the returned operation will stop the + creation and delete the backup. There can be only one pending + backup creation per database. Backup creation of different + databases can run concurrently. + + Returns: + Callable[[~.CreateBackupRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_backup" not in self._stubs: + self._stubs["create_backup"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackup", + request_serializer=gsad_backup.CreateBackupRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_backup"] + + @property + def get_backup(self) -> Callable[[backup.GetBackupRequest], backup.Backup]: + r"""Return a callable for the get backup method over gRPC. + + Gets metadata on a pending or completed + [Backup][google.spanner.admin.database.v1.Backup]. + + Returns: + Callable[[~.GetBackupRequest], + ~.Backup]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_backup" not in self._stubs: + self._stubs["get_backup"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/GetBackup", + request_serializer=backup.GetBackupRequest.serialize, + response_deserializer=backup.Backup.deserialize, + ) + return self._stubs["get_backup"] + + @property + def update_backup( + self, + ) -> Callable[[gsad_backup.UpdateBackupRequest], gsad_backup.Backup]: + r"""Return a callable for the update backup method over gRPC. + + Updates a pending or completed + [Backup][google.spanner.admin.database.v1.Backup]. + + Returns: + Callable[[~.UpdateBackupRequest], + ~.Backup]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_backup" not in self._stubs: + self._stubs["update_backup"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackup", + request_serializer=gsad_backup.UpdateBackupRequest.serialize, + response_deserializer=gsad_backup.Backup.deserialize, + ) + return self._stubs["update_backup"] + + @property + def delete_backup(self) -> Callable[[backup.DeleteBackupRequest], empty.Empty]: + r"""Return a callable for the delete backup method over gRPC. + + Deletes a pending or completed + [Backup][google.spanner.admin.database.v1.Backup]. + + Returns: + Callable[[~.DeleteBackupRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_backup" not in self._stubs: + self._stubs["delete_backup"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackup", + request_serializer=backup.DeleteBackupRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_backup"] + + @property + def list_backups( + self, + ) -> Callable[[backup.ListBackupsRequest], backup.ListBackupsResponse]: + r"""Return a callable for the list backups method over gRPC. + + Lists completed and pending backups. Backups returned are + ordered by ``create_time`` in descending order, starting from + the most recent ``create_time``. + + Returns: + Callable[[~.ListBackupsRequest], + ~.ListBackupsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_backups" not in self._stubs: + self._stubs["list_backups"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackups", + request_serializer=backup.ListBackupsRequest.serialize, + response_deserializer=backup.ListBackupsResponse.deserialize, + ) + return self._stubs["list_backups"] + + @property + def restore_database( + self, + ) -> Callable[ + [spanner_database_admin.RestoreDatabaseRequest], operations.Operation + ]: + r"""Return a callable for the restore database method over gRPC. + + Create a new database by restoring from a completed backup. The + new database must be in the same project and in an instance with + the same instance configuration as the instance containing the + backup. The returned database [long-running + operation][google.longrunning.Operation] has a name of the + format + ``projects//instances//databases//operations/``, + and can be used to track the progress of the operation, and to + cancel it. The [metadata][google.longrunning.Operation.metadata] + field type is + [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. + The [response][google.longrunning.Operation.response] type is + [Database][google.spanner.admin.database.v1.Database], if + successful. Cancelling the returned operation will stop the + restore and delete the database. There can be only one database + being restored into an instance at a time. Once the restore + operation completes, a new restore operation can be initiated, + without waiting for the optimize operation associated with the + first restore to complete. + + Returns: + Callable[[~.RestoreDatabaseRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "restore_database" not in self._stubs: + self._stubs["restore_database"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/RestoreDatabase", + request_serializer=spanner_database_admin.RestoreDatabaseRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["restore_database"] + + @property + def list_database_operations( + self, + ) -> Callable[ + [spanner_database_admin.ListDatabaseOperationsRequest], + spanner_database_admin.ListDatabaseOperationsResponse, + ]: + r"""Return a callable for the list database operations method over gRPC. + + Lists database + [longrunning-operations][google.longrunning.Operation]. A + database operation has a name of the form + ``projects//instances//databases//operations/``. + The long-running operation + [metadata][google.longrunning.Operation.metadata] field type + ``metadata.type_url`` describes the type of the metadata. + Operations returned include those that have + completed/failed/canceled within the last 7 days, and pending + operations. + + Returns: + Callable[[~.ListDatabaseOperationsRequest], + ~.ListDatabaseOperationsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_database_operations" not in self._stubs: + self._stubs["list_database_operations"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseOperations", + request_serializer=spanner_database_admin.ListDatabaseOperationsRequest.serialize, + response_deserializer=spanner_database_admin.ListDatabaseOperationsResponse.deserialize, + ) + return self._stubs["list_database_operations"] + + @property + def list_backup_operations( + self, + ) -> Callable[ + [backup.ListBackupOperationsRequest], backup.ListBackupOperationsResponse + ]: + r"""Return a callable for the list backup operations method over gRPC. + + Lists the backup [long-running + operations][google.longrunning.Operation] in the given instance. + A backup operation has a name of the form + ``projects//instances//backups//operations/``. + The long-running operation + [metadata][google.longrunning.Operation.metadata] field type + ``metadata.type_url`` describes the type of the metadata. + Operations returned include those that have + completed/failed/canceled within the last 7 days, and pending + operations. Operations returned are ordered by + ``operation.metadata.value.progress.start_time`` in descending + order starting from the most recently started operation. + + Returns: + Callable[[~.ListBackupOperationsRequest], + ~.ListBackupOperationsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_backup_operations" not in self._stubs: + self._stubs["list_backup_operations"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupOperations", + request_serializer=backup.ListBackupOperationsRequest.serialize, + response_deserializer=backup.ListBackupOperationsResponse.deserialize, + ) + return self._stubs["list_backup_operations"] + + +__all__ = ("DatabaseAdminGrpcTransport",) diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py new file mode 100644 index 0000000000..45f2e2d9e6 --- /dev/null +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py @@ -0,0 +1,831 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.spanner_admin_database_v1.types import backup +from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup +from google.cloud.spanner_admin_database_v1.types import spanner_database_admin +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.longrunning import operations_pb2 as operations # type: ignore +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import DatabaseAdminTransport, DEFAULT_CLIENT_INFO +from .grpc import DatabaseAdminGrpcTransport + + +class DatabaseAdminGrpcAsyncIOTransport(DatabaseAdminTransport): + """gRPC AsyncIO backend transport for DatabaseAdmin. + + Cloud Spanner Database Admin API + The Cloud Spanner Database Admin API can be used to create, + drop, and list databases. It also enables updating the schema of + pre-existing databases. It can be also used to create, delete + and list backups for a database and to restore from an existing + backup. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "spanner.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "spanner.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + self._stubs = {} + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if "operations_client" not in self.__dict__: + self.__dict__["operations_client"] = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self.__dict__["operations_client"] + + @property + def list_databases( + self, + ) -> Callable[ + [spanner_database_admin.ListDatabasesRequest], + Awaitable[spanner_database_admin.ListDatabasesResponse], + ]: + r"""Return a callable for the list databases method over gRPC. + + Lists Cloud Spanner databases. + + Returns: + Callable[[~.ListDatabasesRequest], + Awaitable[~.ListDatabasesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_databases" not in self._stubs: + self._stubs["list_databases"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabases", + request_serializer=spanner_database_admin.ListDatabasesRequest.serialize, + response_deserializer=spanner_database_admin.ListDatabasesResponse.deserialize, + ) + return self._stubs["list_databases"] + + @property + def create_database( + self, + ) -> Callable[ + [spanner_database_admin.CreateDatabaseRequest], Awaitable[operations.Operation] + ]: + r"""Return a callable for the create database method over gRPC. + + Creates a new Cloud Spanner database and starts to prepare it + for serving. The returned [long-running + operation][google.longrunning.Operation] will have a name of the + format ``/operations/`` and can be + used to track preparation of the database. The + [metadata][google.longrunning.Operation.metadata] field type is + [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. + The [response][google.longrunning.Operation.response] field type + is [Database][google.spanner.admin.database.v1.Database], if + successful. + + Returns: + Callable[[~.CreateDatabaseRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_database" not in self._stubs: + self._stubs["create_database"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/CreateDatabase", + request_serializer=spanner_database_admin.CreateDatabaseRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_database"] + + @property + def get_database( + self, + ) -> Callable[ + [spanner_database_admin.GetDatabaseRequest], + Awaitable[spanner_database_admin.Database], + ]: + r"""Return a callable for the get database method over gRPC. + + Gets the state of a Cloud Spanner database. + + Returns: + Callable[[~.GetDatabaseRequest], + Awaitable[~.Database]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_database" not in self._stubs: + self._stubs["get_database"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabase", + request_serializer=spanner_database_admin.GetDatabaseRequest.serialize, + response_deserializer=spanner_database_admin.Database.deserialize, + ) + return self._stubs["get_database"] + + @property + def update_database_ddl( + self, + ) -> Callable[ + [spanner_database_admin.UpdateDatabaseDdlRequest], + Awaitable[operations.Operation], + ]: + r"""Return a callable for the update database ddl method over gRPC. + + Updates the schema of a Cloud Spanner database by + creating/altering/dropping tables, columns, indexes, etc. The + returned [long-running operation][google.longrunning.Operation] + will have a name of the format + ``/operations/`` and can be used to + track execution of the schema change(s). The + [metadata][google.longrunning.Operation.metadata] field type is + [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. + The operation has no response. + + Returns: + Callable[[~.UpdateDatabaseDdlRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_database_ddl" not in self._stubs: + self._stubs["update_database_ddl"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabaseDdl", + request_serializer=spanner_database_admin.UpdateDatabaseDdlRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["update_database_ddl"] + + @property + def drop_database( + self, + ) -> Callable[[spanner_database_admin.DropDatabaseRequest], Awaitable[empty.Empty]]: + r"""Return a callable for the drop database method over gRPC. + + Drops (aka deletes) a Cloud Spanner database. Completed backups + for the database will be retained according to their + ``expire_time``. + + Returns: + Callable[[~.DropDatabaseRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "drop_database" not in self._stubs: + self._stubs["drop_database"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/DropDatabase", + request_serializer=spanner_database_admin.DropDatabaseRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["drop_database"] + + @property + def get_database_ddl( + self, + ) -> Callable[ + [spanner_database_admin.GetDatabaseDdlRequest], + Awaitable[spanner_database_admin.GetDatabaseDdlResponse], + ]: + r"""Return a callable for the get database ddl method over gRPC. + + Returns the schema of a Cloud Spanner database as a list of + formatted DDL statements. This method does not show pending + schema updates, those may be queried using the + [Operations][google.longrunning.Operations] API. + + Returns: + Callable[[~.GetDatabaseDdlRequest], + Awaitable[~.GetDatabaseDdlResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_database_ddl" not in self._stubs: + self._stubs["get_database_ddl"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabaseDdl", + request_serializer=spanner_database_admin.GetDatabaseDdlRequest.serialize, + response_deserializer=spanner_database_admin.GetDatabaseDdlResponse.deserialize, + ) + return self._stubs["get_database_ddl"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy.SetIamPolicyRequest], Awaitable[policy.Policy]]: + r"""Return a callable for the set iam policy method over gRPC. + + Sets the access control policy on a database or backup resource. + Replaces any existing policy. + + Authorization requires ``spanner.databases.setIamPolicy`` + permission on + [resource][google.iam.v1.SetIamPolicyRequest.resource]. For + backups, authorization requires ``spanner.backups.setIamPolicy`` + permission on + [resource][google.iam.v1.SetIamPolicyRequest.resource]. + + Returns: + Callable[[~.SetIamPolicyRequest], + Awaitable[~.Policy]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/SetIamPolicy", + request_serializer=iam_policy.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy.GetIamPolicyRequest], Awaitable[policy.Policy]]: + r"""Return a callable for the get iam policy method over gRPC. + + Gets the access control policy for a database or backup + resource. Returns an empty policy if a database or backup exists + but does not have a policy set. + + Authorization requires ``spanner.databases.getIamPolicy`` + permission on + [resource][google.iam.v1.GetIamPolicyRequest.resource]. For + backups, authorization requires ``spanner.backups.getIamPolicy`` + permission on + [resource][google.iam.v1.GetIamPolicyRequest.resource]. + + Returns: + Callable[[~.GetIamPolicyRequest], + Awaitable[~.Policy]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/GetIamPolicy", + request_serializer=iam_policy.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy.TestIamPermissionsRequest], + Awaitable[iam_policy.TestIamPermissionsResponse], + ]: + r"""Return a callable for the test iam permissions method over gRPC. + + Returns permissions that the caller has on the specified + database or backup resource. + + Attempting this RPC on a non-existent Cloud Spanner database + will result in a NOT_FOUND error if the user has + ``spanner.databases.list`` permission on the containing Cloud + Spanner instance. Otherwise returns an empty set of permissions. + Calling this method on a backup that does not exist will result + in a NOT_FOUND error if the user has ``spanner.backups.list`` + permission on the containing instance. + + Returns: + Callable[[~.TestIamPermissionsRequest], + Awaitable[~.TestIamPermissionsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/TestIamPermissions", + request_serializer=iam_policy.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def create_backup( + self, + ) -> Callable[[gsad_backup.CreateBackupRequest], Awaitable[operations.Operation]]: + r"""Return a callable for the create backup method over gRPC. + + Starts creating a new Cloud Spanner Backup. The returned backup + [long-running operation][google.longrunning.Operation] will have + a name of the format + ``projects//instances//backups//operations/`` + and can be used to track creation of the backup. The + [metadata][google.longrunning.Operation.metadata] field type is + [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + The [response][google.longrunning.Operation.response] field type + is [Backup][google.spanner.admin.database.v1.Backup], if + successful. Cancelling the returned operation will stop the + creation and delete the backup. There can be only one pending + backup creation per database. Backup creation of different + databases can run concurrently. + + Returns: + Callable[[~.CreateBackupRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_backup" not in self._stubs: + self._stubs["create_backup"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackup", + request_serializer=gsad_backup.CreateBackupRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_backup"] + + @property + def get_backup( + self, + ) -> Callable[[backup.GetBackupRequest], Awaitable[backup.Backup]]: + r"""Return a callable for the get backup method over gRPC. + + Gets metadata on a pending or completed + [Backup][google.spanner.admin.database.v1.Backup]. + + Returns: + Callable[[~.GetBackupRequest], + Awaitable[~.Backup]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_backup" not in self._stubs: + self._stubs["get_backup"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/GetBackup", + request_serializer=backup.GetBackupRequest.serialize, + response_deserializer=backup.Backup.deserialize, + ) + return self._stubs["get_backup"] + + @property + def update_backup( + self, + ) -> Callable[[gsad_backup.UpdateBackupRequest], Awaitable[gsad_backup.Backup]]: + r"""Return a callable for the update backup method over gRPC. + + Updates a pending or completed + [Backup][google.spanner.admin.database.v1.Backup]. + + Returns: + Callable[[~.UpdateBackupRequest], + Awaitable[~.Backup]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_backup" not in self._stubs: + self._stubs["update_backup"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackup", + request_serializer=gsad_backup.UpdateBackupRequest.serialize, + response_deserializer=gsad_backup.Backup.deserialize, + ) + return self._stubs["update_backup"] + + @property + def delete_backup( + self, + ) -> Callable[[backup.DeleteBackupRequest], Awaitable[empty.Empty]]: + r"""Return a callable for the delete backup method over gRPC. + + Deletes a pending or completed + [Backup][google.spanner.admin.database.v1.Backup]. + + Returns: + Callable[[~.DeleteBackupRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_backup" not in self._stubs: + self._stubs["delete_backup"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackup", + request_serializer=backup.DeleteBackupRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_backup"] + + @property + def list_backups( + self, + ) -> Callable[[backup.ListBackupsRequest], Awaitable[backup.ListBackupsResponse]]: + r"""Return a callable for the list backups method over gRPC. + + Lists completed and pending backups. Backups returned are + ordered by ``create_time`` in descending order, starting from + the most recent ``create_time``. + + Returns: + Callable[[~.ListBackupsRequest], + Awaitable[~.ListBackupsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_backups" not in self._stubs: + self._stubs["list_backups"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackups", + request_serializer=backup.ListBackupsRequest.serialize, + response_deserializer=backup.ListBackupsResponse.deserialize, + ) + return self._stubs["list_backups"] + + @property + def restore_database( + self, + ) -> Callable[ + [spanner_database_admin.RestoreDatabaseRequest], Awaitable[operations.Operation] + ]: + r"""Return a callable for the restore database method over gRPC. + + Create a new database by restoring from a completed backup. The + new database must be in the same project and in an instance with + the same instance configuration as the instance containing the + backup. The returned database [long-running + operation][google.longrunning.Operation] has a name of the + format + ``projects//instances//databases//operations/``, + and can be used to track the progress of the operation, and to + cancel it. The [metadata][google.longrunning.Operation.metadata] + field type is + [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. + The [response][google.longrunning.Operation.response] type is + [Database][google.spanner.admin.database.v1.Database], if + successful. Cancelling the returned operation will stop the + restore and delete the database. There can be only one database + being restored into an instance at a time. Once the restore + operation completes, a new restore operation can be initiated, + without waiting for the optimize operation associated with the + first restore to complete. + + Returns: + Callable[[~.RestoreDatabaseRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "restore_database" not in self._stubs: + self._stubs["restore_database"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/RestoreDatabase", + request_serializer=spanner_database_admin.RestoreDatabaseRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["restore_database"] + + @property + def list_database_operations( + self, + ) -> Callable[ + [spanner_database_admin.ListDatabaseOperationsRequest], + Awaitable[spanner_database_admin.ListDatabaseOperationsResponse], + ]: + r"""Return a callable for the list database operations method over gRPC. + + Lists database + [longrunning-operations][google.longrunning.Operation]. A + database operation has a name of the form + ``projects//instances//databases//operations/``. + The long-running operation + [metadata][google.longrunning.Operation.metadata] field type + ``metadata.type_url`` describes the type of the metadata. + Operations returned include those that have + completed/failed/canceled within the last 7 days, and pending + operations. + + Returns: + Callable[[~.ListDatabaseOperationsRequest], + Awaitable[~.ListDatabaseOperationsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_database_operations" not in self._stubs: + self._stubs["list_database_operations"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseOperations", + request_serializer=spanner_database_admin.ListDatabaseOperationsRequest.serialize, + response_deserializer=spanner_database_admin.ListDatabaseOperationsResponse.deserialize, + ) + return self._stubs["list_database_operations"] + + @property + def list_backup_operations( + self, + ) -> Callable[ + [backup.ListBackupOperationsRequest], + Awaitable[backup.ListBackupOperationsResponse], + ]: + r"""Return a callable for the list backup operations method over gRPC. + + Lists the backup [long-running + operations][google.longrunning.Operation] in the given instance. + A backup operation has a name of the form + ``projects//instances//backups//operations/``. + The long-running operation + [metadata][google.longrunning.Operation.metadata] field type + ``metadata.type_url`` describes the type of the metadata. + Operations returned include those that have + completed/failed/canceled within the last 7 days, and pending + operations. Operations returned are ordered by + ``operation.metadata.value.progress.start_time`` in descending + order starting from the most recently started operation. + + Returns: + Callable[[~.ListBackupOperationsRequest], + Awaitable[~.ListBackupOperationsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_backup_operations" not in self._stubs: + self._stubs["list_backup_operations"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupOperations", + request_serializer=backup.ListBackupOperationsRequest.serialize, + response_deserializer=backup.ListBackupOperationsResponse.deserialize, + ) + return self._stubs["list_backup_operations"] + + +__all__ = ("DatabaseAdminGrpcAsyncIOTransport",) diff --git a/google/cloud/spanner_admin_database_v1/types.py b/google/cloud/spanner_admin_database_v1/types.py deleted file mode 100644 index 43103a0b6d..0000000000 --- a/google/cloud/spanner_admin_database_v1/types.py +++ /dev/null @@ -1,63 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -import sys - -from google.api import http_pb2 -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import policy_pb2 -from google.iam.v1.logging import audit_data_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import any_pb2 -from google.protobuf import descriptor_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import timestamp_pb2 -from google.rpc import status_pb2 - -from google.api_core.protobuf_helpers import get_messages -from google.cloud.spanner_admin_database_v1.proto import spanner_database_admin_pb2 - - -_shared_modules = [ - http_pb2, - iam_policy_pb2, - policy_pb2, - audit_data_pb2, - operations_pb2, - any_pb2, - descriptor_pb2, - empty_pb2, - timestamp_pb2, - status_pb2, -] - -_local_modules = [spanner_database_admin_pb2] - -names = [] - -for module in _shared_modules: - for name, message in get_messages(module).items(): - setattr(sys.modules[__name__], name, message) - names.append(name) - -for module in _local_modules: - for name, message in get_messages(module).items(): - message.__module__ = "google.cloud.spanner_admin_database_v1.types" - setattr(sys.modules[__name__], name, message) - names.append(name) - -__all__ = tuple(sorted(names)) diff --git a/google/cloud/spanner_admin_database_v1/types/__init__.py b/google/cloud/spanner_admin_database_v1/types/__init__.py new file mode 100644 index 0000000000..d02a26ffb5 --- /dev/null +++ b/google/cloud/spanner_admin_database_v1/types/__init__.py @@ -0,0 +1,83 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .common import OperationProgress +from .backup import ( + Backup, + CreateBackupRequest, + CreateBackupMetadata, + UpdateBackupRequest, + GetBackupRequest, + DeleteBackupRequest, + ListBackupsRequest, + ListBackupsResponse, + ListBackupOperationsRequest, + ListBackupOperationsResponse, + BackupInfo, +) +from .spanner_database_admin import ( + RestoreInfo, + Database, + ListDatabasesRequest, + ListDatabasesResponse, + CreateDatabaseRequest, + CreateDatabaseMetadata, + GetDatabaseRequest, + UpdateDatabaseDdlRequest, + UpdateDatabaseDdlMetadata, + DropDatabaseRequest, + GetDatabaseDdlRequest, + GetDatabaseDdlResponse, + ListDatabaseOperationsRequest, + ListDatabaseOperationsResponse, + RestoreDatabaseRequest, + RestoreDatabaseMetadata, + OptimizeRestoredDatabaseMetadata, +) + + +__all__ = ( + "OperationProgress", + "Backup", + "CreateBackupRequest", + "CreateBackupMetadata", + "UpdateBackupRequest", + "GetBackupRequest", + "DeleteBackupRequest", + "ListBackupsRequest", + "ListBackupsResponse", + "ListBackupOperationsRequest", + "ListBackupOperationsResponse", + "BackupInfo", + "RestoreInfo", + "Database", + "ListDatabasesRequest", + "ListDatabasesResponse", + "CreateDatabaseRequest", + "CreateDatabaseMetadata", + "GetDatabaseRequest", + "UpdateDatabaseDdlRequest", + "UpdateDatabaseDdlMetadata", + "DropDatabaseRequest", + "GetDatabaseDdlRequest", + "GetDatabaseDdlResponse", + "ListDatabaseOperationsRequest", + "ListDatabaseOperationsResponse", + "RestoreDatabaseRequest", + "RestoreDatabaseMetadata", + "OptimizeRestoredDatabaseMetadata", +) diff --git a/google/cloud/spanner_admin_database_v1/types/backup.py b/google/cloud/spanner_admin_database_v1/types/backup.py new file mode 100644 index 0000000000..4ab6237f04 --- /dev/null +++ b/google/cloud/spanner_admin_database_v1/types/backup.py @@ -0,0 +1,480 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.spanner_admin_database_v1.types import common +from google.longrunning import operations_pb2 as gl_operations # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.spanner.admin.database.v1", + manifest={ + "Backup", + "CreateBackupRequest", + "CreateBackupMetadata", + "UpdateBackupRequest", + "GetBackupRequest", + "DeleteBackupRequest", + "ListBackupsRequest", + "ListBackupsResponse", + "ListBackupOperationsRequest", + "ListBackupOperationsResponse", + "BackupInfo", + }, +) + + +class Backup(proto.Message): + r"""A backup of a Cloud Spanner database. + + Attributes: + database (str): + Required for the + [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] + operation. Name of the database from which this backup was + created. This needs to be in the same instance as the + backup. Values are of the form + ``projects//instances//databases/``. + expire_time (~.timestamp.Timestamp): + Required for the + [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] + operation. The expiration time of the backup, with + microseconds granularity that must be at least 6 hours and + at most 366 days from the time the CreateBackup request is + processed. Once the ``expire_time`` has passed, the backup + is eligible to be automatically deleted by Cloud Spanner to + free the resources used by the backup. + name (str): + Output only for the + [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] + operation. Required for the + [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup] + operation. + + A globally unique identifier for the backup which cannot be + changed. Values are of the form + ``projects//instances//backups/[a-z][a-z0-9_\-]*[a-z0-9]`` + The final segment of the name must be between 2 and 60 + characters in length. + + The backup is stored in the location(s) specified in the + instance configuration of the instance containing the + backup, identified by the prefix of the backup name of the + form ``projects//instances/``. + create_time (~.timestamp.Timestamp): + Output only. The backup will contain an externally + consistent copy of the database at the timestamp specified + by ``create_time``. ``create_time`` is approximately the + time the + [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] + request is received. + size_bytes (int): + Output only. Size of the backup in bytes. + state (~.gsad_backup.Backup.State): + Output only. The current state of the backup. + referencing_databases (Sequence[str]): + Output only. The names of the restored databases that + reference the backup. The database names are of the form + ``projects//instances//databases/``. + Referencing databases may exist in different instances. The + existence of any referencing database prevents the backup + from being deleted. When a restored database from the backup + enters the ``READY`` state, the reference to the backup is + removed. + """ + + class State(proto.Enum): + r"""Indicates the current state of the backup.""" + STATE_UNSPECIFIED = 0 + CREATING = 1 + READY = 2 + + database = proto.Field(proto.STRING, number=2) + + expire_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + + name = proto.Field(proto.STRING, number=1) + + create_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,) + + size_bytes = proto.Field(proto.INT64, number=5) + + state = proto.Field(proto.ENUM, number=6, enum=State,) + + referencing_databases = proto.RepeatedField(proto.STRING, number=7) + + +class CreateBackupRequest(proto.Message): + r"""The request for + [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]. + + Attributes: + parent (str): + Required. The name of the instance in which the backup will + be created. This must be the same instance that contains the + database the backup will be created from. The backup will be + stored in the location(s) specified in the instance + configuration of this instance. Values are of the form + ``projects//instances/``. + backup_id (str): + Required. The id of the backup to be created. The + ``backup_id`` appended to ``parent`` forms the full backup + name of the form + ``projects//instances//backups/``. + backup (~.gsad_backup.Backup): + Required. The backup to create. + """ + + parent = proto.Field(proto.STRING, number=1) + + backup_id = proto.Field(proto.STRING, number=2) + + backup = proto.Field(proto.MESSAGE, number=3, message="Backup",) + + +class CreateBackupMetadata(proto.Message): + r"""Metadata type for the operation returned by + [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]. + + Attributes: + name (str): + The name of the backup being created. + database (str): + The name of the database the backup is + created from. + progress (~.common.OperationProgress): + The progress of the + [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] + operation. + cancel_time (~.timestamp.Timestamp): + The time at which cancellation of this operation was + received. + [Operations.CancelOperation][google.longrunning.Operations.CancelOperation] + starts asynchronous cancellation on a long-running + operation. The server makes a best effort to cancel the + operation, but success is not guaranteed. Clients can use + [Operations.GetOperation][google.longrunning.Operations.GetOperation] + or other methods to check whether the cancellation succeeded + or whether the operation completed despite cancellation. On + successful cancellation, the operation is not deleted; + instead, it becomes an operation with an [Operation.error][] + value with a + [google.rpc.Status.code][google.rpc.Status.code] of 1, + corresponding to ``Code.CANCELLED``. + """ + + name = proto.Field(proto.STRING, number=1) + + database = proto.Field(proto.STRING, number=2) + + progress = proto.Field(proto.MESSAGE, number=3, message=common.OperationProgress,) + + cancel_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,) + + +class UpdateBackupRequest(proto.Message): + r"""The request for + [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup]. + + Attributes: + backup (~.gsad_backup.Backup): + Required. The backup to update. ``backup.name``, and the + fields to be updated as specified by ``update_mask`` are + required. Other fields are ignored. Update is only supported + for the following fields: + + - ``backup.expire_time``. + update_mask (~.field_mask.FieldMask): + Required. A mask specifying which fields (e.g. + ``expire_time``) in the Backup resource should be updated. + This mask is relative to the Backup resource, not to the + request message. The field mask must always be specified; + this prevents any future fields from being erased + accidentally by clients that do not know about them. + """ + + backup = proto.Field(proto.MESSAGE, number=1, message="Backup",) + + update_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,) + + +class GetBackupRequest(proto.Message): + r"""The request for + [GetBackup][google.spanner.admin.database.v1.DatabaseAdmin.GetBackup]. + + Attributes: + name (str): + Required. Name of the backup. Values are of the form + ``projects//instances//backups/``. + """ + + name = proto.Field(proto.STRING, number=1) + + +class DeleteBackupRequest(proto.Message): + r"""The request for + [DeleteBackup][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup]. + + Attributes: + name (str): + Required. Name of the backup to delete. Values are of the + form + ``projects//instances//backups/``. + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListBackupsRequest(proto.Message): + r"""The request for + [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]. + + Attributes: + parent (str): + Required. The instance to list backups from. Values are of + the form ``projects//instances/``. + filter (str): + An expression that filters the list of returned backups. + + A filter expression consists of a field name, a comparison + operator, and a value for filtering. The value must be a + string, a number, or a boolean. The comparison operator must + be one of: ``<``, ``>``, ``<=``, ``>=``, ``!=``, ``=``, or + ``:``. Colon ``:`` is the contains operator. Filter rules + are not case sensitive. + + The following fields in the + [Backup][google.spanner.admin.database.v1.Backup] are + eligible for filtering: + + - ``name`` + - ``database`` + - ``state`` + - ``create_time`` (and values are of the format + YYYY-MM-DDTHH:MM:SSZ) + - ``expire_time`` (and values are of the format + YYYY-MM-DDTHH:MM:SSZ) + - ``size_bytes`` + + You can combine multiple expressions by enclosing each + expression in parentheses. By default, expressions are + combined with AND logic, but you can specify AND, OR, and + NOT logic explicitly. + + Here are a few examples: + + - ``name:Howl`` - The backup's name contains the string + "howl". + - ``database:prod`` - The database's name contains the + string "prod". + - ``state:CREATING`` - The backup is pending creation. + - ``state:READY`` - The backup is fully created and ready + for use. + - ``(name:howl) AND (create_time < \"2018-03-28T14:50:00Z\")`` + - The backup name contains the string "howl" and + ``create_time`` of the backup is before + 2018-03-28T14:50:00Z. + - ``expire_time < \"2018-03-28T14:50:00Z\"`` - The backup + ``expire_time`` is before 2018-03-28T14:50:00Z. + - ``size_bytes > 10000000000`` - The backup's size is + greater than 10GB + page_size (int): + Number of backups to be returned in the + response. If 0 or less, defaults to the server's + maximum allowed page size. + page_token (str): + If non-empty, ``page_token`` should contain a + [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token] + from a previous + [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse] + to the same ``parent`` and with the same ``filter``. + """ + + parent = proto.Field(proto.STRING, number=1) + + filter = proto.Field(proto.STRING, number=2) + + page_size = proto.Field(proto.INT32, number=3) + + page_token = proto.Field(proto.STRING, number=4) + + +class ListBackupsResponse(proto.Message): + r"""The response for + [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]. + + Attributes: + backups (Sequence[~.gsad_backup.Backup]): + The list of matching backups. Backups returned are ordered + by ``create_time`` in descending order, starting from the + most recent ``create_time``. + next_page_token (str): + ``next_page_token`` can be sent in a subsequent + [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups] + call to fetch more of the matching backups. + """ + + @property + def raw_page(self): + return self + + backups = proto.RepeatedField(proto.MESSAGE, number=1, message="Backup",) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class ListBackupOperationsRequest(proto.Message): + r"""The request for + [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations]. + + Attributes: + parent (str): + Required. The instance of the backup operations. Values are + of the form ``projects//instances/``. + filter (str): + An expression that filters the list of returned backup + operations. + + A filter expression consists of a field name, a comparison + operator, and a value for filtering. The value must be a + string, a number, or a boolean. The comparison operator must + be one of: ``<``, ``>``, ``<=``, ``>=``, ``!=``, ``=``, or + ``:``. Colon ``:`` is the contains operator. Filter rules + are not case sensitive. + + The following fields in the + [operation][google.longrunning.Operation] are eligible for + filtering: + + - ``name`` - The name of the long-running operation + - ``done`` - False if the operation is in progress, else + true. + - ``metadata.@type`` - the type of metadata. For example, + the type string for + [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] + is + ``type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata``. + - ``metadata.`` - any field in metadata.value. + - ``error`` - Error associated with the long-running + operation. + - ``response.@type`` - the type of response. + - ``response.`` - any field in response.value. + + You can combine multiple expressions by enclosing each + expression in parentheses. By default, expressions are + combined with AND logic, but you can specify AND, OR, and + NOT logic explicitly. + + Here are a few examples: + + - ``done:true`` - The operation is complete. + - ``metadata.database:prod`` - The database the backup was + taken from has a name containing the string "prod". + - ``(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND`` + ``(metadata.name:howl) AND`` + ``(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND`` + ``(error:*)`` - Returns operations where: + + - The operation's metadata type is + [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + - The backup name contains the string "howl". + - The operation started before 2018-03-28T14:50:00Z. + - The operation resulted in an error. + page_size (int): + Number of operations to be returned in the + response. If 0 or less, defaults to the server's + maximum allowed page size. + page_token (str): + If non-empty, ``page_token`` should contain a + [next_page_token][google.spanner.admin.database.v1.ListBackupOperationsResponse.next_page_token] + from a previous + [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse] + to the same ``parent`` and with the same ``filter``. + """ + + parent = proto.Field(proto.STRING, number=1) + + filter = proto.Field(proto.STRING, number=2) + + page_size = proto.Field(proto.INT32, number=3) + + page_token = proto.Field(proto.STRING, number=4) + + +class ListBackupOperationsResponse(proto.Message): + r"""The response for + [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations]. + + Attributes: + operations (Sequence[~.gl_operations.Operation]): + The list of matching backup [long-running + operations][google.longrunning.Operation]. Each operation's + name will be prefixed by the backup's name and the + operation's + [metadata][google.longrunning.Operation.metadata] will be of + type + [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + Operations returned include those that are pending or have + completed/failed/canceled within the last 7 days. Operations + returned are ordered by + ``operation.metadata.value.progress.start_time`` in + descending order starting from the most recently started + operation. + next_page_token (str): + ``next_page_token`` can be sent in a subsequent + [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations] + call to fetch more of the matching metadata. + """ + + @property + def raw_page(self): + return self + + operations = proto.RepeatedField( + proto.MESSAGE, number=1, message=gl_operations.Operation, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class BackupInfo(proto.Message): + r"""Information about a backup. + + Attributes: + backup (str): + Name of the backup. + create_time (~.timestamp.Timestamp): + The backup contains an externally consistent copy of + ``source_database`` at the timestamp specified by + ``create_time``. + source_database (str): + Name of the database the backup was created + from. + """ + + backup = proto.Field(proto.STRING, number=1) + + create_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) + + source_database = proto.Field(proto.STRING, number=3) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/spanner_admin_database_v1/types/common.py b/google/cloud/spanner_admin_database_v1/types/common.py new file mode 100644 index 0000000000..ccd8de2819 --- /dev/null +++ b/google/cloud/spanner_admin_database_v1/types/common.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.spanner.admin.database.v1", manifest={"OperationProgress",}, +) + + +class OperationProgress(proto.Message): + r"""Encapsulates progress related information for a Cloud Spanner + long running operation. + + Attributes: + progress_percent (int): + Percent completion of the operation. + Values are between 0 and 100 inclusive. + start_time (~.timestamp.Timestamp): + Time the request was received. + end_time (~.timestamp.Timestamp): + If set, the time at which this operation + failed or was completed successfully. + """ + + progress_percent = proto.Field(proto.INT32, number=1) + + start_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) + + end_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py b/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py new file mode 100644 index 0000000000..b2b5939f5b --- /dev/null +++ b/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py @@ -0,0 +1,562 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup +from google.cloud.spanner_admin_database_v1.types import common +from google.longrunning import operations_pb2 as gl_operations # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.spanner.admin.database.v1", + manifest={ + "RestoreSourceType", + "RestoreInfo", + "Database", + "ListDatabasesRequest", + "ListDatabasesResponse", + "CreateDatabaseRequest", + "CreateDatabaseMetadata", + "GetDatabaseRequest", + "UpdateDatabaseDdlRequest", + "UpdateDatabaseDdlMetadata", + "DropDatabaseRequest", + "GetDatabaseDdlRequest", + "GetDatabaseDdlResponse", + "ListDatabaseOperationsRequest", + "ListDatabaseOperationsResponse", + "RestoreDatabaseRequest", + "RestoreDatabaseMetadata", + "OptimizeRestoredDatabaseMetadata", + }, +) + + +class RestoreSourceType(proto.Enum): + r"""Indicates the type of the restore source.""" + TYPE_UNSPECIFIED = 0 + BACKUP = 1 + + +class RestoreInfo(proto.Message): + r"""Information about the database restore. + + Attributes: + source_type (~.spanner_database_admin.RestoreSourceType): + The type of the restore source. + backup_info (~.gsad_backup.BackupInfo): + Information about the backup used to restore + the database. The backup may no longer exist. + """ + + source_type = proto.Field(proto.ENUM, number=1, enum="RestoreSourceType",) + + backup_info = proto.Field( + proto.MESSAGE, number=2, oneof="source_info", message=gsad_backup.BackupInfo, + ) + + +class Database(proto.Message): + r"""A Cloud Spanner database. + + Attributes: + name (str): + Required. The name of the database. Values are of the form + ``projects//instances//databases/``, + where ```` is as specified in the + ``CREATE DATABASE`` statement. This name can be passed to + other API methods to identify the database. + state (~.spanner_database_admin.Database.State): + Output only. The current database state. + create_time (~.timestamp.Timestamp): + Output only. If exists, the time at which the + database creation started. + restore_info (~.spanner_database_admin.RestoreInfo): + Output only. Applicable only for restored + databases. Contains information about the + restore source. + """ + + class State(proto.Enum): + r"""Indicates the current state of the database.""" + STATE_UNSPECIFIED = 0 + CREATING = 1 + READY = 2 + READY_OPTIMIZING = 3 + + name = proto.Field(proto.STRING, number=1) + + state = proto.Field(proto.ENUM, number=2, enum=State,) + + create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + + restore_info = proto.Field(proto.MESSAGE, number=4, message="RestoreInfo",) + + +class ListDatabasesRequest(proto.Message): + r"""The request for + [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. + + Attributes: + parent (str): + Required. The instance whose databases should be listed. + Values are of the form + ``projects//instances/``. + page_size (int): + Number of databases to be returned in the + response. If 0 or less, defaults to the server's + maximum allowed page size. + page_token (str): + If non-empty, ``page_token`` should contain a + [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token] + from a previous + [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse]. + """ + + parent = proto.Field(proto.STRING, number=1) + + page_size = proto.Field(proto.INT32, number=3) + + page_token = proto.Field(proto.STRING, number=4) + + +class ListDatabasesResponse(proto.Message): + r"""The response for + [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. + + Attributes: + databases (Sequence[~.spanner_database_admin.Database]): + Databases that matched the request. + next_page_token (str): + ``next_page_token`` can be sent in a subsequent + [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases] + call to fetch more of the matching databases. + """ + + @property + def raw_page(self): + return self + + databases = proto.RepeatedField(proto.MESSAGE, number=1, message="Database",) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class CreateDatabaseRequest(proto.Message): + r"""The request for + [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase]. + + Attributes: + parent (str): + Required. The name of the instance that will serve the new + database. Values are of the form + ``projects//instances/``. + create_statement (str): + Required. A ``CREATE DATABASE`` statement, which specifies + the ID of the new database. The database ID must conform to + the regular expression ``[a-z][a-z0-9_\-]*[a-z0-9]`` and be + between 2 and 30 characters in length. If the database ID is + a reserved word or if it contains a hyphen, the database ID + must be enclosed in backticks (:literal:`\``). + extra_statements (Sequence[str]): + Optional. A list of DDL statements to run + inside the newly created database. Statements + can create tables, indexes, etc. These + statements execute atomically with the creation + of the database: if there is an error in any + statement, the database is not created. + """ + + parent = proto.Field(proto.STRING, number=1) + + create_statement = proto.Field(proto.STRING, number=2) + + extra_statements = proto.RepeatedField(proto.STRING, number=3) + + +class CreateDatabaseMetadata(proto.Message): + r"""Metadata type for the operation returned by + [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase]. + + Attributes: + database (str): + The database being created. + """ + + database = proto.Field(proto.STRING, number=1) + + +class GetDatabaseRequest(proto.Message): + r"""The request for + [GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase]. + + Attributes: + name (str): + Required. The name of the requested database. Values are of + the form + ``projects//instances//databases/``. + """ + + name = proto.Field(proto.STRING, number=1) + + +class UpdateDatabaseDdlRequest(proto.Message): + r"""Enqueues the given DDL statements to be applied, in order but not + necessarily all at once, to the database schema at some point (or + points) in the future. The server checks that the statements are + executable (syntactically valid, name tables that exist, etc.) + before enqueueing them, but they may still fail upon later execution + (e.g., if a statement from another batch of statements is applied + first and it conflicts in some way, or if there is some data-related + problem like a ``NULL`` value in a column to which ``NOT NULL`` + would be added). If a statement fails, all subsequent statements in + the batch are automatically cancelled. + + Each batch of statements is assigned a name which can be used with + the [Operations][google.longrunning.Operations] API to monitor + progress. See the + [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id] + field for more details. + + Attributes: + database (str): + Required. The database to update. + statements (Sequence[str]): + Required. DDL statements to be applied to the + database. + operation_id (str): + If empty, the new update request is assigned an + automatically-generated operation ID. Otherwise, + ``operation_id`` is used to construct the name of the + resulting [Operation][google.longrunning.Operation]. + + Specifying an explicit operation ID simplifies determining + whether the statements were executed in the event that the + [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] + call is replayed, or the return value is otherwise lost: the + [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database] + and ``operation_id`` fields can be combined to form the + [name][google.longrunning.Operation.name] of the resulting + [longrunning.Operation][google.longrunning.Operation]: + ``/operations/``. + + ``operation_id`` should be unique within the database, and + must be a valid identifier: ``[a-z][a-z0-9_]*``. Note that + automatically-generated operation IDs always begin with an + underscore. If the named operation already exists, + [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] + returns ``ALREADY_EXISTS``. + """ + + database = proto.Field(proto.STRING, number=1) + + statements = proto.RepeatedField(proto.STRING, number=2) + + operation_id = proto.Field(proto.STRING, number=3) + + +class UpdateDatabaseDdlMetadata(proto.Message): + r"""Metadata type for the operation returned by + [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]. + + Attributes: + database (str): + The database being modified. + statements (Sequence[str]): + For an update this list contains all the + statements. For an individual statement, this + list contains only that statement. + commit_timestamps (Sequence[~.timestamp.Timestamp]): + Reports the commit timestamps of all statements that have + succeeded so far, where ``commit_timestamps[i]`` is the + commit timestamp for the statement ``statements[i]``. + """ + + database = proto.Field(proto.STRING, number=1) + + statements = proto.RepeatedField(proto.STRING, number=2) + + commit_timestamps = proto.RepeatedField( + proto.MESSAGE, number=3, message=timestamp.Timestamp, + ) + + +class DropDatabaseRequest(proto.Message): + r"""The request for + [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase]. + + Attributes: + database (str): + Required. The database to be dropped. + """ + + database = proto.Field(proto.STRING, number=1) + + +class GetDatabaseDdlRequest(proto.Message): + r"""The request for + [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. + + Attributes: + database (str): + Required. The database whose schema we wish + to get. + """ + + database = proto.Field(proto.STRING, number=1) + + +class GetDatabaseDdlResponse(proto.Message): + r"""The response for + [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. + + Attributes: + statements (Sequence[str]): + A list of formatted DDL statements defining + the schema of the database specified in the + request. + """ + + statements = proto.RepeatedField(proto.STRING, number=1) + + +class ListDatabaseOperationsRequest(proto.Message): + r"""The request for + [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations]. + + Attributes: + parent (str): + Required. The instance of the database operations. Values + are of the form ``projects//instances/``. + filter (str): + An expression that filters the list of returned operations. + + A filter expression consists of a field name, a comparison + operator, and a value for filtering. The value must be a + string, a number, or a boolean. The comparison operator must + be one of: ``<``, ``>``, ``<=``, ``>=``, ``!=``, ``=``, or + ``:``. Colon ``:`` is the contains operator. Filter rules + are not case sensitive. + + The following fields in the + [Operation][google.longrunning.Operation] are eligible for + filtering: + + - ``name`` - The name of the long-running operation + - ``done`` - False if the operation is in progress, else + true. + - ``metadata.@type`` - the type of metadata. For example, + the type string for + [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata] + is + ``type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata``. + - ``metadata.`` - any field in metadata.value. + - ``error`` - Error associated with the long-running + operation. + - ``response.@type`` - the type of response. + - ``response.`` - any field in response.value. + + You can combine multiple expressions by enclosing each + expression in parentheses. By default, expressions are + combined with AND logic. However, you can specify AND, OR, + and NOT logic explicitly. + + Here are a few examples: + + - ``done:true`` - The operation is complete. + - ``(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata) AND`` + ``(metadata.source_type:BACKUP) AND`` + ``(metadata.backup_info.backup:backup_howl) AND`` + ``(metadata.name:restored_howl) AND`` + ``(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND`` + ``(error:*)`` - Return operations where: + + - The operation's metadata type is + [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. + - The database is restored from a backup. + - The backup name contains "backup_howl". + - The restored database's name contains "restored_howl". + - The operation started before 2018-03-28T14:50:00Z. + - The operation resulted in an error. + page_size (int): + Number of operations to be returned in the + response. If 0 or less, defaults to the server's + maximum allowed page size. + page_token (str): + If non-empty, ``page_token`` should contain a + [next_page_token][google.spanner.admin.database.v1.ListDatabaseOperationsResponse.next_page_token] + from a previous + [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse] + to the same ``parent`` and with the same ``filter``. + """ + + parent = proto.Field(proto.STRING, number=1) + + filter = proto.Field(proto.STRING, number=2) + + page_size = proto.Field(proto.INT32, number=3) + + page_token = proto.Field(proto.STRING, number=4) + + +class ListDatabaseOperationsResponse(proto.Message): + r"""The response for + [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations]. + + Attributes: + operations (Sequence[~.gl_operations.Operation]): + The list of matching database [long-running + operations][google.longrunning.Operation]. Each operation's + name will be prefixed by the database's name. The + operation's + [metadata][google.longrunning.Operation.metadata] field type + ``metadata.type_url`` describes the type of the metadata. + next_page_token (str): + ``next_page_token`` can be sent in a subsequent + [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations] + call to fetch more of the matching metadata. + """ + + @property + def raw_page(self): + return self + + operations = proto.RepeatedField( + proto.MESSAGE, number=1, message=gl_operations.Operation, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class RestoreDatabaseRequest(proto.Message): + r"""The request for + [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase]. + + Attributes: + parent (str): + Required. The name of the instance in which to create the + restored database. This instance must be in the same project + and have the same instance configuration as the instance + containing the source backup. Values are of the form + ``projects//instances/``. + database_id (str): + Required. The id of the database to create and restore to. + This database must not already exist. The ``database_id`` + appended to ``parent`` forms the full database name of the + form + ``projects//instances//databases/``. + backup (str): + Name of the backup from which to restore. Values are of the + form + ``projects//instances//backups/``. + """ + + parent = proto.Field(proto.STRING, number=1) + + database_id = proto.Field(proto.STRING, number=2) + + backup = proto.Field(proto.STRING, number=3, oneof="source") + + +class RestoreDatabaseMetadata(proto.Message): + r"""Metadata type for the long-running operation returned by + [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase]. + + Attributes: + name (str): + Name of the database being created and + restored to. + source_type (~.spanner_database_admin.RestoreSourceType): + The type of the restore source. + backup_info (~.gsad_backup.BackupInfo): + Information about the backup used to restore + the database. + progress (~.common.OperationProgress): + The progress of the + [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase] + operation. + cancel_time (~.timestamp.Timestamp): + The time at which cancellation of this operation was + received. + [Operations.CancelOperation][google.longrunning.Operations.CancelOperation] + starts asynchronous cancellation on a long-running + operation. The server makes a best effort to cancel the + operation, but success is not guaranteed. Clients can use + [Operations.GetOperation][google.longrunning.Operations.GetOperation] + or other methods to check whether the cancellation succeeded + or whether the operation completed despite cancellation. On + successful cancellation, the operation is not deleted; + instead, it becomes an operation with an + [Operation.error][google.longrunning.Operation.error] value + with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``. + optimize_database_operation_name (str): + If exists, the name of the long-running operation that will + be used to track the post-restore optimization process to + optimize the performance of the restored database, and + remove the dependency on the restore source. The name is of + the form + ``projects//instances//databases//operations/`` + where the is the name of database being created and restored + to. The metadata type of the long-running operation is + [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata]. + This long-running operation will be automatically created by + the system after the RestoreDatabase long-running operation + completes successfully. This operation will not be created + if the restore was not successful. + """ + + name = proto.Field(proto.STRING, number=1) + + source_type = proto.Field(proto.ENUM, number=2, enum="RestoreSourceType",) + + backup_info = proto.Field( + proto.MESSAGE, number=3, oneof="source_info", message=gsad_backup.BackupInfo, + ) + + progress = proto.Field(proto.MESSAGE, number=4, message=common.OperationProgress,) + + cancel_time = proto.Field(proto.MESSAGE, number=5, message=timestamp.Timestamp,) + + optimize_database_operation_name = proto.Field(proto.STRING, number=6) + + +class OptimizeRestoredDatabaseMetadata(proto.Message): + r"""Metadata type for the long-running operation used to track + the progress of optimizations performed on a newly restored + database. This long-running operation is automatically created + by the system after the successful completion of a database + restore, and cannot be cancelled. + + Attributes: + name (str): + Name of the restored database being + optimized. + progress (~.common.OperationProgress): + The progress of the post-restore + optimizations. + """ + + name = proto.Field(proto.STRING, number=1) + + progress = proto.Field(proto.MESSAGE, number=2, message=common.OperationProgress,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/spanner_admin_instance_v1/__init__.py b/google/cloud/spanner_admin_instance_v1/__init__.py index 53f32d3b47..47ef07bd53 100644 --- a/google/cloud/spanner_admin_instance_v1/__init__.py +++ b/google/cloud/spanner_admin_instance_v1/__init__.py @@ -1,29 +1,51 @@ # -*- coding: utf-8 -*- -# -# Copyright 2018 Google LLC + +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# https://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# -from __future__ import absolute_import - -from google.cloud.spanner_admin_instance_v1 import types -from google.cloud.spanner_admin_instance_v1.gapic import enums -from google.cloud.spanner_admin_instance_v1.gapic import instance_admin_client - - -class InstanceAdminClient(instance_admin_client.InstanceAdminClient): - __doc__ = instance_admin_client.InstanceAdminClient.__doc__ - enums = enums +from .services.instance_admin import InstanceAdminClient +from .types.spanner_instance_admin import CreateInstanceMetadata +from .types.spanner_instance_admin import CreateInstanceRequest +from .types.spanner_instance_admin import DeleteInstanceRequest +from .types.spanner_instance_admin import GetInstanceConfigRequest +from .types.spanner_instance_admin import GetInstanceRequest +from .types.spanner_instance_admin import Instance +from .types.spanner_instance_admin import InstanceConfig +from .types.spanner_instance_admin import ListInstanceConfigsRequest +from .types.spanner_instance_admin import ListInstanceConfigsResponse +from .types.spanner_instance_admin import ListInstancesRequest +from .types.spanner_instance_admin import ListInstancesResponse +from .types.spanner_instance_admin import ReplicaInfo +from .types.spanner_instance_admin import UpdateInstanceMetadata +from .types.spanner_instance_admin import UpdateInstanceRequest -__all__ = ("enums", "types", "InstanceAdminClient") +__all__ = ( + "CreateInstanceMetadata", + "CreateInstanceRequest", + "DeleteInstanceRequest", + "GetInstanceConfigRequest", + "GetInstanceRequest", + "Instance", + "InstanceConfig", + "ListInstanceConfigsRequest", + "ListInstanceConfigsResponse", + "ListInstancesRequest", + "ListInstancesResponse", + "ReplicaInfo", + "UpdateInstanceMetadata", + "UpdateInstanceRequest", + "InstanceAdminClient", +) diff --git a/google/cloud/spanner_admin_instance_v1/gapic/__init__.py b/google/cloud/spanner_admin_instance_v1/gapic/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/google/cloud/spanner_admin_instance_v1/gapic/enums.py b/google/cloud/spanner_admin_instance_v1/gapic/enums.py deleted file mode 100644 index e93cf829d0..0000000000 --- a/google/cloud/spanner_admin_instance_v1/gapic/enums.py +++ /dev/null @@ -1,76 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Wrappers for protocol buffer enum types.""" - -import enum - - -class Instance(object): - class State(enum.IntEnum): - """ - Indicates the current state of the instance. - - Attributes: - STATE_UNSPECIFIED (int): Not specified. - CREATING (int): The instance is still being created. Resources may not be - available yet, and operations such as database creation may not - work. - READY (int): The instance is fully created and ready to do work such as - creating databases. - """ - - STATE_UNSPECIFIED = 0 - CREATING = 1 - READY = 2 - - -class ReplicaInfo(object): - class ReplicaType(enum.IntEnum): - """ - Indicates the type of replica. See the `replica types - documentation `__ - for more details. - - Attributes: - TYPE_UNSPECIFIED (int): Not specified. - READ_WRITE (int): Read-write replicas support both reads and writes. These replicas: - - - Maintain a full copy of your data. - - Serve reads. - - Can vote whether to commit a write. - - Participate in leadership election. - - Are eligible to become a leader. - READ_ONLY (int): Read-only replicas only support reads (not writes). Read-only - replicas: - - - Maintain a full copy of your data. - - Serve reads. - - Do not participate in voting to commit writes. - - Are not eligible to become a leader. - WITNESS (int): Witness replicas don't support reads but do participate in voting to - commit writes. Witness replicas: - - - Do not maintain a full copy of data. - - Do not serve reads. - - Vote whether to commit writes. - - Participate in leader election but are not eligible to become leader. - """ - - TYPE_UNSPECIFIED = 0 - READ_WRITE = 1 - READ_ONLY = 2 - WITNESS = 3 diff --git a/google/cloud/spanner_admin_instance_v1/gapic/instance_admin_client.py b/google/cloud/spanner_admin_instance_v1/gapic/instance_admin_client.py deleted file mode 100644 index bc6934a711..0000000000 --- a/google/cloud/spanner_admin_instance_v1/gapic/instance_admin_client.py +++ /dev/null @@ -1,1223 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.spanner.admin.instance.v1 InstanceAdmin API.""" - -import functools -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.operation -import google.api_core.operations_v1 -import google.api_core.page_iterator -import google.api_core.path_template -import grpc - -from google.cloud.spanner_admin_instance_v1.gapic import enums -from google.cloud.spanner_admin_instance_v1.gapic import instance_admin_client_config -from google.cloud.spanner_admin_instance_v1.gapic.transports import ( - instance_admin_grpc_transport, -) -from google.cloud.spanner_admin_instance_v1.proto import spanner_instance_admin_pb2 -from google.cloud.spanner_admin_instance_v1.proto import spanner_instance_admin_pb2_grpc -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import options_pb2 -from google.iam.v1 import policy_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-spanner").version - - -class InstanceAdminClient(object): - """ - Cloud Spanner Instance Admin API - - The Cloud Spanner Instance Admin API can be used to create, delete, - modify and list instances. Instances are dedicated Cloud Spanner serving - and storage resources to be used by Cloud Spanner databases. - - Each instance has a "configuration", which dictates where the - serving resources for the Cloud Spanner instance are located (e.g., - US-central, Europe). Configurations are created by Google based on - resource availability. - - Cloud Spanner billing is based on the instances that exist and their - sizes. After an instance exists, there are no additional - per-database or per-operation charges for use of the instance - (though there may be additional network bandwidth charges). - Instances offer isolation: problems with databases in one instance - will not affect other instances. However, within an instance - databases can affect each other. For example, if one database in an - instance receives a lot of requests and consumes most of the - instance resources, fewer resources are available for other - databases in that instance, and their performance may suffer. - """ - - SERVICE_ADDRESS = "spanner.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.spanner.admin.instance.v1.InstanceAdmin" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - InstanceAdminClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @classmethod - def instance_path(cls, project, instance): - """Return a fully-qualified instance string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}", - project=project, - instance=instance, - ) - - @classmethod - def instance_config_path(cls, project, instance_config): - """Return a fully-qualified instance_config string.""" - return google.api_core.path_template.expand( - "projects/{project}/instanceConfigs/{instance_config}", - project=project, - instance_config=instance_config, - ) - - @classmethod - def project_path(cls, project): - """Return a fully-qualified project string.""" - return google.api_core.path_template.expand( - "projects/{project}", project=project - ) - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.InstanceAdminGrpcTransport, - Callable[[~.Credentials, type], ~.InstanceAdminGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = instance_admin_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=instance_admin_grpc_transport.InstanceAdminGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = instance_admin_grpc_transport.InstanceAdminGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME] - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def create_instance( - self, - parent, - instance_id, - instance, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates an instance and begins preparing it to begin serving. The - returned ``long-running operation`` can be used to track the progress of - preparing the new instance. The instance name is assigned by the caller. - If the named instance already exists, ``CreateInstance`` returns - ``ALREADY_EXISTS``. - - Immediately upon completion of this request: - - - The instance is readable via the API, with all requested attributes - but no allocated resources. Its state is ``CREATING``. - - Until completion of the returned operation: - - - Cancelling the operation renders the instance immediately unreadable - via the API. - - The instance can be deleted. - - All other attempts to modify the instance are rejected. - - Upon completion of the returned operation: - - - Billing for all successfully-allocated resources begins (some types - may have lower than the requested levels). - - Databases can be created in the instance. - - The instance's allocated resource levels are readable via the API. - - The instance's state becomes ``READY``. - - The returned ``long-running operation`` will have a name of the format - ``/operations/`` and can be used to track - creation of the instance. The ``metadata`` field type is - ``CreateInstanceMetadata``. The ``response`` field type is ``Instance``, - if successful. - - Example: - >>> from google.cloud import spanner_admin_instance_v1 - >>> - >>> client = spanner_admin_instance_v1.InstanceAdminClient() - >>> - >>> parent = client.project_path('[PROJECT]') - >>> - >>> # TODO: Initialize `instance_id`: - >>> instance_id = '' - >>> - >>> # TODO: Initialize `instance`: - >>> instance = {} - >>> - >>> response = client.create_instance(parent, instance_id, instance) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - parent (str): Required. The name of the project in which to create the instance. - Values are of the form ``projects/``. - instance_id (str): Required. The ID of the instance to create. Valid identifiers are of - the form ``[a-z][-a-z0-9]*[a-z0-9]`` and must be between 2 and 64 - characters in length. - instance (Union[dict, ~google.cloud.spanner_admin_instance_v1.types.Instance]): Required. The instance to create. The name may be omitted, but if - specified must be ``/instances/``. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_admin_instance_v1.types.Instance` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.operation.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_instance" not in self._inner_api_calls: - self._inner_api_calls[ - "create_instance" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_instance, - default_retry=self._method_configs["CreateInstance"].retry, - default_timeout=self._method_configs["CreateInstance"].timeout, - client_info=self._client_info, - ) - - request = spanner_instance_admin_pb2.CreateInstanceRequest( - parent=parent, instance_id=instance_id, instance=instance - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["create_instance"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - spanner_instance_admin_pb2.Instance, - metadata_type=spanner_instance_admin_pb2.CreateInstanceMetadata, - ) - - def update_instance( - self, - instance, - field_mask, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates an instance, and begins allocating or releasing resources as - requested. The returned ``long-running operation`` can be used to track - the progress of updating the instance. If the named instance does not - exist, returns ``NOT_FOUND``. - - Immediately upon completion of this request: - - - For resource types for which a decrease in the instance's allocation - has been requested, billing is based on the newly-requested level. - - Until completion of the returned operation: - - - Cancelling the operation sets its metadata's ``cancel_time``, and - begins restoring resources to their pre-request values. The operation - is guaranteed to succeed at undoing all resource changes, after which - point it terminates with a ``CANCELLED`` status. - - All other attempts to modify the instance are rejected. - - Reading the instance via the API continues to give the pre-request - resource levels. - - Upon completion of the returned operation: - - - Billing begins for all successfully-allocated resources (some types - may have lower than the requested levels). - - All newly-reserved resources are available for serving the instance's - tables. - - The instance's new resource levels are readable via the API. - - The returned ``long-running operation`` will have a name of the format - ``/operations/`` and can be used to track - the instance modification. The ``metadata`` field type is - ``UpdateInstanceMetadata``. The ``response`` field type is ``Instance``, - if successful. - - Authorization requires ``spanner.instances.update`` permission on - resource ``name``. - - Example: - >>> from google.cloud import spanner_admin_instance_v1 - >>> - >>> client = spanner_admin_instance_v1.InstanceAdminClient() - >>> - >>> # TODO: Initialize `instance`: - >>> instance = {} - >>> - >>> # TODO: Initialize `field_mask`: - >>> field_mask = {} - >>> - >>> response = client.update_instance(instance, field_mask) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - instance (Union[dict, ~google.cloud.spanner_admin_instance_v1.types.Instance]): Required. The instance to update, which must always include the - instance name. Otherwise, only fields mentioned in ``field_mask`` need - be included. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_admin_instance_v1.types.Instance` - field_mask (Union[dict, ~google.cloud.spanner_admin_instance_v1.types.FieldMask]): Required. A mask specifying which fields in ``Instance`` should be - updated. The field mask must always be specified; this prevents any - future fields in ``Instance`` from being erased accidentally by clients - that do not know about them. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_admin_instance_v1.types.FieldMask` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.operation.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_instance" not in self._inner_api_calls: - self._inner_api_calls[ - "update_instance" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_instance, - default_retry=self._method_configs["UpdateInstance"].retry, - default_timeout=self._method_configs["UpdateInstance"].timeout, - client_info=self._client_info, - ) - - request = spanner_instance_admin_pb2.UpdateInstanceRequest( - instance=instance, field_mask=field_mask - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("instance.name", instance.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["update_instance"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - spanner_instance_admin_pb2.Instance, - metadata_type=spanner_instance_admin_pb2.UpdateInstanceMetadata, - ) - - def list_instance_configs( - self, - parent, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists the supported instance configurations for a given project. - - Example: - >>> from google.cloud import spanner_admin_instance_v1 - >>> - >>> client = spanner_admin_instance_v1.InstanceAdminClient() - >>> - >>> parent = client.project_path('[PROJECT]') - >>> - >>> # Iterate over all results - >>> for element in client.list_instance_configs(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_instance_configs(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. The name of the project for which a list of supported - instance configurations is requested. Values are of the form - ``projects/``. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.spanner_admin_instance_v1.types.InstanceConfig` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_instance_configs" not in self._inner_api_calls: - self._inner_api_calls[ - "list_instance_configs" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_instance_configs, - default_retry=self._method_configs["ListInstanceConfigs"].retry, - default_timeout=self._method_configs["ListInstanceConfigs"].timeout, - client_info=self._client_info, - ) - - request = spanner_instance_admin_pb2.ListInstanceConfigsRequest( - parent=parent, page_size=page_size - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_instance_configs"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="instance_configs", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def get_instance_config( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets information about a particular instance configuration. - - Example: - >>> from google.cloud import spanner_admin_instance_v1 - >>> - >>> client = spanner_admin_instance_v1.InstanceAdminClient() - >>> - >>> name = client.instance_config_path('[PROJECT]', '[INSTANCE_CONFIG]') - >>> - >>> response = client.get_instance_config(name) - - Args: - name (str): Required. The name of the requested instance configuration. Values - are of the form ``projects//instanceConfigs/``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_admin_instance_v1.types.InstanceConfig` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_instance_config" not in self._inner_api_calls: - self._inner_api_calls[ - "get_instance_config" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_instance_config, - default_retry=self._method_configs["GetInstanceConfig"].retry, - default_timeout=self._method_configs["GetInstanceConfig"].timeout, - client_info=self._client_info, - ) - - request = spanner_instance_admin_pb2.GetInstanceConfigRequest(name=name) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_instance_config"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_instances( - self, - parent, - page_size=None, - filter_=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists all instances in the given project. - - Example: - >>> from google.cloud import spanner_admin_instance_v1 - >>> - >>> client = spanner_admin_instance_v1.InstanceAdminClient() - >>> - >>> parent = client.project_path('[PROJECT]') - >>> - >>> # Iterate over all results - >>> for element in client.list_instances(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_instances(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. The name of the project for which a list of instances is - requested. Values are of the form ``projects/``. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - filter_ (str): An expression for filtering the results of the request. Filter rules - are case insensitive. The fields eligible for filtering are: - - - ``name`` - - ``display_name`` - - ``labels.key`` where key is the name of a label - - Some examples of using filters are: - - - ``name:*`` --> The instance has a name. - - ``name:Howl`` --> The instance's name contains the string "howl". - - ``name:HOWL`` --> Equivalent to above. - - ``NAME:howl`` --> Equivalent to above. - - ``labels.env:*`` --> The instance has the label "env". - - ``labels.env:dev`` --> The instance has the label "env" and the value - of the label contains the string "dev". - - ``name:howl labels.env:dev`` --> The instance's name contains "howl" - and it has the label "env" with its value containing "dev". - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.spanner_admin_instance_v1.types.Instance` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_instances" not in self._inner_api_calls: - self._inner_api_calls[ - "list_instances" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_instances, - default_retry=self._method_configs["ListInstances"].retry, - default_timeout=self._method_configs["ListInstances"].timeout, - client_info=self._client_info, - ) - - request = spanner_instance_admin_pb2.ListInstancesRequest( - parent=parent, page_size=page_size, filter=filter_ - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_instances"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="instances", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def get_instance( - self, - name, - field_mask=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets information about a particular instance. - - Example: - >>> from google.cloud import spanner_admin_instance_v1 - >>> - >>> client = spanner_admin_instance_v1.InstanceAdminClient() - >>> - >>> name = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> response = client.get_instance(name) - - Args: - name (str): Required. The name of the requested instance. Values are of the form - ``projects//instances/``. - field_mask (Union[dict, ~google.cloud.spanner_admin_instance_v1.types.FieldMask]): If field_mask is present, specifies the subset of ``Instance`` - fields that should be returned. If absent, all ``Instance`` fields are - returned. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_admin_instance_v1.types.FieldMask` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_admin_instance_v1.types.Instance` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_instance" not in self._inner_api_calls: - self._inner_api_calls[ - "get_instance" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_instance, - default_retry=self._method_configs["GetInstance"].retry, - default_timeout=self._method_configs["GetInstance"].timeout, - client_info=self._client_info, - ) - - request = spanner_instance_admin_pb2.GetInstanceRequest( - name=name, field_mask=field_mask - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_instance"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def delete_instance( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes an instance. - - Immediately upon completion of the request: - - - Billing ceases for all of the instance's reserved resources. - - Soon afterward: - - - The instance and *all of its databases* immediately and irrevocably - disappear from the API. All data in the databases is permanently - deleted. - - Example: - >>> from google.cloud import spanner_admin_instance_v1 - >>> - >>> client = spanner_admin_instance_v1.InstanceAdminClient() - >>> - >>> name = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> client.delete_instance(name) - - Args: - name (str): Required. The name of the instance to be deleted. Values are of the - form ``projects//instances/`` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_instance" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_instance" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_instance, - default_retry=self._method_configs["DeleteInstance"].retry, - default_timeout=self._method_configs["DeleteInstance"].timeout, - client_info=self._client_info, - ) - - request = spanner_instance_admin_pb2.DeleteInstanceRequest(name=name) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_instance"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_iam_policy( - self, - resource, - policy, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Sets the access control policy on an instance resource. Replaces any - existing policy. - - Authorization requires ``spanner.instances.setIamPolicy`` on - ``resource``. - - Example: - >>> from google.cloud import spanner_admin_instance_v1 - >>> - >>> client = spanner_admin_instance_v1.InstanceAdminClient() - >>> - >>> # TODO: Initialize `resource`: - >>> resource = '' - >>> - >>> # TODO: Initialize `policy`: - >>> policy = {} - >>> - >>> response = client.set_iam_policy(resource, policy) - - Args: - resource (str): REQUIRED: The resource for which the policy is being specified. - See the operation documentation for the appropriate value for this field. - policy (Union[dict, ~google.cloud.spanner_admin_instance_v1.types.Policy]): REQUIRED: The complete policy to be applied to the ``resource``. The - size of the policy is limited to a few 10s of KB. An empty policy is a - valid policy but certain Cloud Platform services (such as Projects) - might reject them. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_admin_instance_v1.types.Policy` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_admin_instance_v1.types.Policy` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_iam_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "set_iam_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_iam_policy, - default_retry=self._method_configs["SetIamPolicy"].retry, - default_timeout=self._method_configs["SetIamPolicy"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.SetIamPolicyRequest(resource=resource, policy=policy) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_iam_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_iam_policy( - self, - resource, - options_=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets the access control policy for an instance resource. Returns an - empty policy if an instance exists but does not have a policy set. - - Authorization requires ``spanner.instances.getIamPolicy`` on - ``resource``. - - Example: - >>> from google.cloud import spanner_admin_instance_v1 - >>> - >>> client = spanner_admin_instance_v1.InstanceAdminClient() - >>> - >>> # TODO: Initialize `resource`: - >>> resource = '' - >>> - >>> response = client.get_iam_policy(resource) - - Args: - resource (str): REQUIRED: The resource for which the policy is being requested. - See the operation documentation for the appropriate value for this field. - options_ (Union[dict, ~google.cloud.spanner_admin_instance_v1.types.GetPolicyOptions]): OPTIONAL: A ``GetPolicyOptions`` object for specifying options to - ``GetIamPolicy``. This field is only used by Cloud IAM. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_admin_instance_v1.types.GetPolicyOptions` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_admin_instance_v1.types.Policy` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_iam_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "get_iam_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_iam_policy, - default_retry=self._method_configs["GetIamPolicy"].retry, - default_timeout=self._method_configs["GetIamPolicy"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.GetIamPolicyRequest( - resource=resource, options=options_ - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_iam_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def test_iam_permissions( - self, - resource, - permissions, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Returns permissions that the caller has on the specified instance - resource. - - Attempting this RPC on a non-existent Cloud Spanner instance resource - will result in a NOT_FOUND error if the user has - ``spanner.instances.list`` permission on the containing Google Cloud - Project. Otherwise returns an empty set of permissions. - - Example: - >>> from google.cloud import spanner_admin_instance_v1 - >>> - >>> client = spanner_admin_instance_v1.InstanceAdminClient() - >>> - >>> # TODO: Initialize `resource`: - >>> resource = '' - >>> - >>> # TODO: Initialize `permissions`: - >>> permissions = [] - >>> - >>> response = client.test_iam_permissions(resource, permissions) - - Args: - resource (str): REQUIRED: The resource for which the policy detail is being requested. - See the operation documentation for the appropriate value for this field. - permissions (list[str]): The set of permissions to check for the ``resource``. Permissions - with wildcards (such as '*' or 'storage.*') are not allowed. For more - information see `IAM - Overview `__. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_admin_instance_v1.types.TestIamPermissionsResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "test_iam_permissions" not in self._inner_api_calls: - self._inner_api_calls[ - "test_iam_permissions" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.test_iam_permissions, - default_retry=self._method_configs["TestIamPermissions"].retry, - default_timeout=self._method_configs["TestIamPermissions"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, permissions=permissions - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["test_iam_permissions"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) diff --git a/google/cloud/spanner_admin_instance_v1/gapic/instance_admin_client_config.py b/google/cloud/spanner_admin_instance_v1/gapic/instance_admin_client_config.py deleted file mode 100644 index cb18900f9e..0000000000 --- a/google/cloud/spanner_admin_instance_v1/gapic/instance_admin_client_config.py +++ /dev/null @@ -1,112 +0,0 @@ -config = { - "interfaces": { - "google.spanner.admin.instance.v1.InstanceAdmin": { - "retry_codes": { - "retry_policy_1_codes": ["UNAVAILABLE", "DEADLINE_EXCEEDED"], - "no_retry_2_codes": [], - "no_retry_codes": [], - "retry_policy_2_codes": ["UNAVAILABLE", "DEADLINE_EXCEEDED"], - "no_retry_1_codes": [], - }, - "retry_params": { - "retry_policy_1_params": { - "initial_retry_delay_millis": 1000, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 32000, - "initial_rpc_timeout_millis": 3600000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 3600000, - "total_timeout_millis": 3600000, - }, - "retry_policy_2_params": { - "initial_retry_delay_millis": 1000, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 32000, - "initial_rpc_timeout_millis": 30000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 30000, - "total_timeout_millis": 30000, - }, - "no_retry_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 0.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 0, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 0, - "total_timeout_millis": 0, - }, - "no_retry_1_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 0.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 3600000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 3600000, - "total_timeout_millis": 3600000, - }, - "no_retry_2_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 0.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 30000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 30000, - "total_timeout_millis": 30000, - }, - }, - "methods": { - "CreateInstance": { - "timeout_millis": 3600000, - "retry_codes_name": "no_retry_1_codes", - "retry_params_name": "no_retry_1_params", - }, - "UpdateInstance": { - "timeout_millis": 3600000, - "retry_codes_name": "no_retry_1_codes", - "retry_params_name": "no_retry_1_params", - }, - "ListInstanceConfigs": { - "timeout_millis": 3600000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "GetInstanceConfig": { - "timeout_millis": 3600000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "ListInstances": { - "timeout_millis": 3600000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "GetInstance": { - "timeout_millis": 3600000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "DeleteInstance": { - "timeout_millis": 3600000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "SetIamPolicy": { - "timeout_millis": 30000, - "retry_codes_name": "no_retry_2_codes", - "retry_params_name": "no_retry_2_params", - }, - "GetIamPolicy": { - "timeout_millis": 30000, - "retry_codes_name": "retry_policy_2_codes", - "retry_params_name": "retry_policy_2_params", - }, - "TestIamPermissions": { - "timeout_millis": 30000, - "retry_codes_name": "no_retry_2_codes", - "retry_params_name": "no_retry_2_params", - }, - }, - } - } -} diff --git a/google/cloud/spanner_admin_instance_v1/gapic/transports/__init__.py b/google/cloud/spanner_admin_instance_v1/gapic/transports/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/google/cloud/spanner_admin_instance_v1/gapic/transports/instance_admin_grpc_transport.py b/google/cloud/spanner_admin_instance_v1/gapic/transports/instance_admin_grpc_transport.py deleted file mode 100644 index c823c59bbb..0000000000 --- a/google/cloud/spanner_admin_instance_v1/gapic/transports/instance_admin_grpc_transport.py +++ /dev/null @@ -1,340 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers -import google.api_core.operations_v1 - -from google.cloud.spanner_admin_instance_v1.proto import spanner_instance_admin_pb2_grpc - - -class InstanceAdminGrpcTransport(object): - """gRPC transport class providing stubs for - google.spanner.admin.instance.v1 InstanceAdmin API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ( - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/spanner.admin", - ) - - def __init__( - self, channel=None, credentials=None, address="spanner.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive." - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "instance_admin_stub": spanner_instance_admin_pb2_grpc.InstanceAdminStub( - channel - ) - } - - # Because this API includes a method that returns a - # long-running operation (proto: google.longrunning.Operation), - # instantiate an LRO client. - self._operations_client = google.api_core.operations_v1.OperationsClient( - channel - ) - - @classmethod - def create_channel( - cls, address="spanner.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def create_instance(self): - """Return the gRPC stub for :meth:`InstanceAdminClient.create_instance`. - - Creates an instance and begins preparing it to begin serving. The - returned ``long-running operation`` can be used to track the progress of - preparing the new instance. The instance name is assigned by the caller. - If the named instance already exists, ``CreateInstance`` returns - ``ALREADY_EXISTS``. - - Immediately upon completion of this request: - - - The instance is readable via the API, with all requested attributes - but no allocated resources. Its state is ``CREATING``. - - Until completion of the returned operation: - - - Cancelling the operation renders the instance immediately unreadable - via the API. - - The instance can be deleted. - - All other attempts to modify the instance are rejected. - - Upon completion of the returned operation: - - - Billing for all successfully-allocated resources begins (some types - may have lower than the requested levels). - - Databases can be created in the instance. - - The instance's allocated resource levels are readable via the API. - - The instance's state becomes ``READY``. - - The returned ``long-running operation`` will have a name of the format - ``/operations/`` and can be used to track - creation of the instance. The ``metadata`` field type is - ``CreateInstanceMetadata``. The ``response`` field type is ``Instance``, - if successful. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["instance_admin_stub"].CreateInstance - - @property - def update_instance(self): - """Return the gRPC stub for :meth:`InstanceAdminClient.update_instance`. - - Updates an instance, and begins allocating or releasing resources as - requested. The returned ``long-running operation`` can be used to track - the progress of updating the instance. If the named instance does not - exist, returns ``NOT_FOUND``. - - Immediately upon completion of this request: - - - For resource types for which a decrease in the instance's allocation - has been requested, billing is based on the newly-requested level. - - Until completion of the returned operation: - - - Cancelling the operation sets its metadata's ``cancel_time``, and - begins restoring resources to their pre-request values. The operation - is guaranteed to succeed at undoing all resource changes, after which - point it terminates with a ``CANCELLED`` status. - - All other attempts to modify the instance are rejected. - - Reading the instance via the API continues to give the pre-request - resource levels. - - Upon completion of the returned operation: - - - Billing begins for all successfully-allocated resources (some types - may have lower than the requested levels). - - All newly-reserved resources are available for serving the instance's - tables. - - The instance's new resource levels are readable via the API. - - The returned ``long-running operation`` will have a name of the format - ``/operations/`` and can be used to track - the instance modification. The ``metadata`` field type is - ``UpdateInstanceMetadata``. The ``response`` field type is ``Instance``, - if successful. - - Authorization requires ``spanner.instances.update`` permission on - resource ``name``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["instance_admin_stub"].UpdateInstance - - @property - def list_instance_configs(self): - """Return the gRPC stub for :meth:`InstanceAdminClient.list_instance_configs`. - - Lists the supported instance configurations for a given project. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["instance_admin_stub"].ListInstanceConfigs - - @property - def get_instance_config(self): - """Return the gRPC stub for :meth:`InstanceAdminClient.get_instance_config`. - - Gets information about a particular instance configuration. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["instance_admin_stub"].GetInstanceConfig - - @property - def list_instances(self): - """Return the gRPC stub for :meth:`InstanceAdminClient.list_instances`. - - Lists all instances in the given project. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["instance_admin_stub"].ListInstances - - @property - def get_instance(self): - """Return the gRPC stub for :meth:`InstanceAdminClient.get_instance`. - - Gets information about a particular instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["instance_admin_stub"].GetInstance - - @property - def delete_instance(self): - """Return the gRPC stub for :meth:`InstanceAdminClient.delete_instance`. - - Deletes an instance. - - Immediately upon completion of the request: - - - Billing ceases for all of the instance's reserved resources. - - Soon afterward: - - - The instance and *all of its databases* immediately and irrevocably - disappear from the API. All data in the databases is permanently - deleted. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["instance_admin_stub"].DeleteInstance - - @property - def set_iam_policy(self): - """Return the gRPC stub for :meth:`InstanceAdminClient.set_iam_policy`. - - Sets the access control policy on an instance resource. Replaces any - existing policy. - - Authorization requires ``spanner.instances.setIamPolicy`` on - ``resource``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["instance_admin_stub"].SetIamPolicy - - @property - def get_iam_policy(self): - """Return the gRPC stub for :meth:`InstanceAdminClient.get_iam_policy`. - - Gets the access control policy for an instance resource. Returns an - empty policy if an instance exists but does not have a policy set. - - Authorization requires ``spanner.instances.getIamPolicy`` on - ``resource``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["instance_admin_stub"].GetIamPolicy - - @property - def test_iam_permissions(self): - """Return the gRPC stub for :meth:`InstanceAdminClient.test_iam_permissions`. - - Returns permissions that the caller has on the specified instance - resource. - - Attempting this RPC on a non-existent Cloud Spanner instance resource - will result in a NOT_FOUND error if the user has - ``spanner.instances.list`` permission on the containing Google Cloud - Project. Otherwise returns an empty set of permissions. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["instance_admin_stub"].TestIamPermissions diff --git a/google/cloud/spanner_admin_instance_v1/proto/__init__.py b/google/cloud/spanner_admin_instance_v1/proto/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin_pb2.py b/google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin_pb2.py deleted file mode 100644 index 8d086520e5..0000000000 --- a/google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin_pb2.py +++ /dev/null @@ -1,1896 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 -from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin.proto", - package="google.spanner.admin.instance.v1", - syntax="proto3", - serialized_options=b"\n$com.google.spanner.admin.instance.v1B\031SpannerInstanceAdminProtoP\001ZHgoogle.golang.org/genproto/googleapis/spanner/admin/instance/v1;instance\252\002&Google.Cloud.Spanner.Admin.Instance.V1\312\002&Google\\Cloud\\Spanner\\Admin\\Instance\\V1\352\002+Google::Cloud::Spanner::Admin::Instance::V1", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\nIgoogle/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin.proto\x12 google.spanner.admin.instance.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xda\x01\n\x0bReplicaInfo\x12\x10\n\x08location\x18\x01 \x01(\t\x12G\n\x04type\x18\x02 \x01(\x0e\x32\x39.google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType\x12\x1f\n\x17\x64\x65\x66\x61ult_leader_location\x18\x03 \x01(\x08"O\n\x0bReplicaType\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nREAD_WRITE\x10\x01\x12\r\n\tREAD_ONLY\x10\x02\x12\x0b\n\x07WITNESS\x10\x03"\xd7\x01\n\x0eInstanceConfig\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12?\n\x08replicas\x18\x03 \x03(\x0b\x32-.google.spanner.admin.instance.v1.ReplicaInfo:`\xea\x41]\n%spanner.googleapis.com/InstanceConfig\x12\x34projects/{project}/instanceConfigs/{instance_config}"\xd5\x03\n\x08Instance\x12\x0c\n\x04name\x18\x01 \x01(\t\x12:\n\x06\x63onfig\x18\x02 \x01(\tB*\xfa\x41\'\n%spanner.googleapis.com/InstanceConfig\x12\x14\n\x0c\x64isplay_name\x18\x03 \x01(\t\x12\x12\n\nnode_count\x18\x05 \x01(\x05\x12?\n\x05state\x18\x06 \x01(\x0e\x32\x30.google.spanner.admin.instance.v1.Instance.State\x12\x46\n\x06labels\x18\x07 \x03(\x0b\x32\x36.google.spanner.admin.instance.v1.Instance.LabelsEntry\x12\x15\n\rendpoint_uris\x18\x08 \x03(\t\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"7\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0c\n\x08\x43REATING\x10\x01\x12\t\n\x05READY\x10\x02:M\xea\x41J\n\x1fspanner.googleapis.com/Instance\x12\'projects/{project}/instances/{instance}"\x88\x01\n\x1aListInstanceConfigsRequest\x12\x43\n\x06parent\x18\x01 \x01(\tB3\xe0\x41\x02\xfa\x41-\n+cloudresourcemanager.googleapis.com/Project\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"\x82\x01\n\x1bListInstanceConfigsResponse\x12J\n\x10instance_configs\x18\x01 \x03(\x0b\x32\x30.google.spanner.admin.instance.v1.InstanceConfig\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"W\n\x18GetInstanceConfigRequest\x12;\n\x04name\x18\x01 \x01(\tB-\xe0\x41\x02\xfa\x41\'\n%spanner.googleapis.com/InstanceConfig"{\n\x12GetInstanceRequest\x12\x35\n\x04name\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1fspanner.googleapis.com/Instance\x12.\n\nfield_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"\xb9\x01\n\x15\x43reateInstanceRequest\x12\x43\n\x06parent\x18\x01 \x01(\tB3\xe0\x41\x02\xfa\x41-\n+cloudresourcemanager.googleapis.com/Project\x12\x18\n\x0binstance_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x41\n\x08instance\x18\x03 \x01(\x0b\x32*.google.spanner.admin.instance.v1.InstanceB\x03\xe0\x41\x02"\x92\x01\n\x14ListInstancesRequest\x12\x43\n\x06parent\x18\x01 \x01(\tB3\xe0\x41\x02\xfa\x41-\n+cloudresourcemanager.googleapis.com/Project\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x04 \x01(\t"o\n\x15ListInstancesResponse\x12=\n\tinstances\x18\x01 \x03(\x0b\x32*.google.spanner.admin.instance.v1.Instance\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"\x8f\x01\n\x15UpdateInstanceRequest\x12\x41\n\x08instance\x18\x01 \x01(\x0b\x32*.google.spanner.admin.instance.v1.InstanceB\x03\xe0\x41\x02\x12\x33\n\nfield_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02"N\n\x15\x44\x65leteInstanceRequest\x12\x35\n\x04name\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1fspanner.googleapis.com/Instance"\xe5\x01\n\x16\x43reateInstanceMetadata\x12<\n\x08instance\x18\x01 \x01(\x0b\x32*.google.spanner.admin.instance.v1.Instance\x12.\n\nstart_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x63\x61ncel_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xe5\x01\n\x16UpdateInstanceMetadata\x12<\n\x08instance\x18\x01 \x01(\x0b\x32*.google.spanner.admin.instance.v1.Instance\x12.\n\nstart_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x63\x61ncel_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp2\xbf\x10\n\rInstanceAdmin\x12\xcc\x01\n\x13ListInstanceConfigs\x12<.google.spanner.admin.instance.v1.ListInstanceConfigsRequest\x1a=.google.spanner.admin.instance.v1.ListInstanceConfigsResponse"8\x82\xd3\xe4\x93\x02)\x12\'/v1/{parent=projects/*}/instanceConfigs\xda\x41\x06parent\x12\xb9\x01\n\x11GetInstanceConfig\x12:.google.spanner.admin.instance.v1.GetInstanceConfigRequest\x1a\x30.google.spanner.admin.instance.v1.InstanceConfig"6\x82\xd3\xe4\x93\x02)\x12\'/v1/{name=projects/*/instanceConfigs/*}\xda\x41\x04name\x12\xb4\x01\n\rListInstances\x12\x36.google.spanner.admin.instance.v1.ListInstancesRequest\x1a\x37.google.spanner.admin.instance.v1.ListInstancesResponse"2\x82\xd3\xe4\x93\x02#\x12!/v1/{parent=projects/*}/instances\xda\x41\x06parent\x12\xa1\x01\n\x0bGetInstance\x12\x34.google.spanner.admin.instance.v1.GetInstanceRequest\x1a*.google.spanner.admin.instance.v1.Instance"0\x82\xd3\xe4\x93\x02#\x12!/v1/{name=projects/*/instances/*}\xda\x41\x04name\x12\x9c\x02\n\x0e\x43reateInstance\x12\x37.google.spanner.admin.instance.v1.CreateInstanceRequest\x1a\x1d.google.longrunning.Operation"\xb1\x01\x82\xd3\xe4\x93\x02&"!/v1/{parent=projects/*}/instances:\x01*\xda\x41\x1bparent,instance_id,instance\xca\x41\x64\n)google.spanner.admin.instance.v1.Instance\x12\x37google.spanner.admin.instance.v1.CreateInstanceMetadata\x12\x9d\x02\n\x0eUpdateInstance\x12\x37.google.spanner.admin.instance.v1.UpdateInstanceRequest\x1a\x1d.google.longrunning.Operation"\xb2\x01\x82\xd3\xe4\x93\x02/2*/v1/{instance.name=projects/*/instances/*}:\x01*\xda\x41\x13instance,field_mask\xca\x41\x64\n)google.spanner.admin.instance.v1.Instance\x12\x37google.spanner.admin.instance.v1.UpdateInstanceMetadata\x12\x93\x01\n\x0e\x44\x65leteInstance\x12\x37.google.spanner.admin.instance.v1.DeleteInstanceRequest\x1a\x16.google.protobuf.Empty"0\x82\xd3\xe4\x93\x02#*!/v1/{name=projects/*/instances/*}\xda\x41\x04name\x12\x9a\x01\n\x0cSetIamPolicy\x12".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"O\x82\xd3\xe4\x93\x02\x37"2/v1/{resource=projects/*/instances/*}:setIamPolicy:\x01*\xda\x41\x0fresource,policy\x12\x93\x01\n\x0cGetIamPolicy\x12".google.iam.v1.GetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"H\x82\xd3\xe4\x93\x02\x37"2/v1/{resource=projects/*/instances/*}:getIamPolicy:\x01*\xda\x41\x08resource\x12\xc5\x01\n\x12TestIamPermissions\x12(.google.iam.v1.TestIamPermissionsRequest\x1a).google.iam.v1.TestIamPermissionsResponse"Z\x82\xd3\xe4\x93\x02="8/v1/{resource=projects/*/instances/*}:testIamPermissions:\x01*\xda\x41\x14resource,permissions\x1ax\xca\x41\x16spanner.googleapis.com\xd2\x41\\https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/spanner.adminB\x8d\x02\n$com.google.spanner.admin.instance.v1B\x19SpannerInstanceAdminProtoP\x01ZHgoogle.golang.org/genproto/googleapis/spanner/admin/instance/v1;instance\xaa\x02&Google.Cloud.Spanner.Admin.Instance.V1\xca\x02&Google\\Cloud\\Spanner\\Admin\\Instance\\V1\xea\x02+Google::Cloud::Spanner::Admin::Instance::V1b\x06proto3', - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_api_dot_client__pb2.DESCRIPTOR, - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_iam_dot_v1_dot_iam__policy__pb2.DESCRIPTOR, - google_dot_iam_dot_v1_dot_policy__pb2.DESCRIPTOR, - google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, - google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, - google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - ], -) - - -_REPLICAINFO_REPLICATYPE = _descriptor.EnumDescriptor( - name="ReplicaType", - full_name="google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="TYPE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="READ_WRITE", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="READ_ONLY", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="WITNESS", - index=3, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=559, - serialized_end=638, -) -_sym_db.RegisterEnumDescriptor(_REPLICAINFO_REPLICATYPE) - -_INSTANCE_STATE = _descriptor.EnumDescriptor( - name="State", - full_name="google.spanner.admin.instance.v1.Instance.State", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="STATE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="CREATING", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="READY", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=1194, - serialized_end=1249, -) -_sym_db.RegisterEnumDescriptor(_INSTANCE_STATE) - - -_REPLICAINFO = _descriptor.Descriptor( - name="ReplicaInfo", - full_name="google.spanner.admin.instance.v1.ReplicaInfo", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="location", - full_name="google.spanner.admin.instance.v1.ReplicaInfo.location", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="type", - full_name="google.spanner.admin.instance.v1.ReplicaInfo.type", - index=1, - number=2, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="default_leader_location", - full_name="google.spanner.admin.instance.v1.ReplicaInfo.default_leader_location", - index=2, - number=3, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_REPLICAINFO_REPLICATYPE], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=420, - serialized_end=638, -) - - -_INSTANCECONFIG = _descriptor.Descriptor( - name="InstanceConfig", - full_name="google.spanner.admin.instance.v1.InstanceConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.spanner.admin.instance.v1.InstanceConfig.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="display_name", - full_name="google.spanner.admin.instance.v1.InstanceConfig.display_name", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="replicas", - full_name="google.spanner.admin.instance.v1.InstanceConfig.replicas", - index=2, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"\352A]\n%spanner.googleapis.com/InstanceConfig\0224projects/{project}/instanceConfigs/{instance_config}", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=641, - serialized_end=856, -) - - -_INSTANCE_LABELSENTRY = _descriptor.Descriptor( - name="LabelsEntry", - full_name="google.spanner.admin.instance.v1.Instance.LabelsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.spanner.admin.instance.v1.Instance.LabelsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.spanner.admin.instance.v1.Instance.LabelsEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1147, - serialized_end=1192, -) - -_INSTANCE = _descriptor.Descriptor( - name="Instance", - full_name="google.spanner.admin.instance.v1.Instance", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.spanner.admin.instance.v1.Instance.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="config", - full_name="google.spanner.admin.instance.v1.Instance.config", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\372A'\n%spanner.googleapis.com/InstanceConfig", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="display_name", - full_name="google.spanner.admin.instance.v1.Instance.display_name", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="node_count", - full_name="google.spanner.admin.instance.v1.Instance.node_count", - index=3, - number=5, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="state", - full_name="google.spanner.admin.instance.v1.Instance.state", - index=4, - number=6, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="labels", - full_name="google.spanner.admin.instance.v1.Instance.labels", - index=5, - number=7, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="endpoint_uris", - full_name="google.spanner.admin.instance.v1.Instance.endpoint_uris", - index=6, - number=8, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_INSTANCE_LABELSENTRY], - enum_types=[_INSTANCE_STATE], - serialized_options=b"\352AJ\n\037spanner.googleapis.com/Instance\022'projects/{project}/instances/{instance}", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=859, - serialized_end=1328, -) - - -_LISTINSTANCECONFIGSREQUEST = _descriptor.Descriptor( - name="ListInstanceConfigsRequest", - full_name="google.spanner.admin.instance.v1.ListInstanceConfigsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.spanner.admin.instance.v1.ListInstanceConfigsRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A-\n+cloudresourcemanager.googleapis.com/Project", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.spanner.admin.instance.v1.ListInstanceConfigsRequest.page_size", - index=1, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.spanner.admin.instance.v1.ListInstanceConfigsRequest.page_token", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1331, - serialized_end=1467, -) - - -_LISTINSTANCECONFIGSRESPONSE = _descriptor.Descriptor( - name="ListInstanceConfigsResponse", - full_name="google.spanner.admin.instance.v1.ListInstanceConfigsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="instance_configs", - full_name="google.spanner.admin.instance.v1.ListInstanceConfigsResponse.instance_configs", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.spanner.admin.instance.v1.ListInstanceConfigsResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1470, - serialized_end=1600, -) - - -_GETINSTANCECONFIGREQUEST = _descriptor.Descriptor( - name="GetInstanceConfigRequest", - full_name="google.spanner.admin.instance.v1.GetInstanceConfigRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.spanner.admin.instance.v1.GetInstanceConfigRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A'\n%spanner.googleapis.com/InstanceConfig", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1602, - serialized_end=1689, -) - - -_GETINSTANCEREQUEST = _descriptor.Descriptor( - name="GetInstanceRequest", - full_name="google.spanner.admin.instance.v1.GetInstanceRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.spanner.admin.instance.v1.GetInstanceRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A!\n\037spanner.googleapis.com/Instance", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="field_mask", - full_name="google.spanner.admin.instance.v1.GetInstanceRequest.field_mask", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1691, - serialized_end=1814, -) - - -_CREATEINSTANCEREQUEST = _descriptor.Descriptor( - name="CreateInstanceRequest", - full_name="google.spanner.admin.instance.v1.CreateInstanceRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.spanner.admin.instance.v1.CreateInstanceRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A-\n+cloudresourcemanager.googleapis.com/Project", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="instance_id", - full_name="google.spanner.admin.instance.v1.CreateInstanceRequest.instance_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="instance", - full_name="google.spanner.admin.instance.v1.CreateInstanceRequest.instance", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1817, - serialized_end=2002, -) - - -_LISTINSTANCESREQUEST = _descriptor.Descriptor( - name="ListInstancesRequest", - full_name="google.spanner.admin.instance.v1.ListInstancesRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.spanner.admin.instance.v1.ListInstancesRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A-\n+cloudresourcemanager.googleapis.com/Project", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.spanner.admin.instance.v1.ListInstancesRequest.page_size", - index=1, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.spanner.admin.instance.v1.ListInstancesRequest.page_token", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="filter", - full_name="google.spanner.admin.instance.v1.ListInstancesRequest.filter", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2005, - serialized_end=2151, -) - - -_LISTINSTANCESRESPONSE = _descriptor.Descriptor( - name="ListInstancesResponse", - full_name="google.spanner.admin.instance.v1.ListInstancesResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="instances", - full_name="google.spanner.admin.instance.v1.ListInstancesResponse.instances", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.spanner.admin.instance.v1.ListInstancesResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2153, - serialized_end=2264, -) - - -_UPDATEINSTANCEREQUEST = _descriptor.Descriptor( - name="UpdateInstanceRequest", - full_name="google.spanner.admin.instance.v1.UpdateInstanceRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="instance", - full_name="google.spanner.admin.instance.v1.UpdateInstanceRequest.instance", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="field_mask", - full_name="google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2267, - serialized_end=2410, -) - - -_DELETEINSTANCEREQUEST = _descriptor.Descriptor( - name="DeleteInstanceRequest", - full_name="google.spanner.admin.instance.v1.DeleteInstanceRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.spanner.admin.instance.v1.DeleteInstanceRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A!\n\037spanner.googleapis.com/Instance", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2412, - serialized_end=2490, -) - - -_CREATEINSTANCEMETADATA = _descriptor.Descriptor( - name="CreateInstanceMetadata", - full_name="google.spanner.admin.instance.v1.CreateInstanceMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="instance", - full_name="google.spanner.admin.instance.v1.CreateInstanceMetadata.instance", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="start_time", - full_name="google.spanner.admin.instance.v1.CreateInstanceMetadata.start_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cancel_time", - full_name="google.spanner.admin.instance.v1.CreateInstanceMetadata.cancel_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_time", - full_name="google.spanner.admin.instance.v1.CreateInstanceMetadata.end_time", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2493, - serialized_end=2722, -) - - -_UPDATEINSTANCEMETADATA = _descriptor.Descriptor( - name="UpdateInstanceMetadata", - full_name="google.spanner.admin.instance.v1.UpdateInstanceMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="instance", - full_name="google.spanner.admin.instance.v1.UpdateInstanceMetadata.instance", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="start_time", - full_name="google.spanner.admin.instance.v1.UpdateInstanceMetadata.start_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cancel_time", - full_name="google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_time", - full_name="google.spanner.admin.instance.v1.UpdateInstanceMetadata.end_time", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2725, - serialized_end=2954, -) - -_REPLICAINFO.fields_by_name["type"].enum_type = _REPLICAINFO_REPLICATYPE -_REPLICAINFO_REPLICATYPE.containing_type = _REPLICAINFO -_INSTANCECONFIG.fields_by_name["replicas"].message_type = _REPLICAINFO -_INSTANCE_LABELSENTRY.containing_type = _INSTANCE -_INSTANCE.fields_by_name["state"].enum_type = _INSTANCE_STATE -_INSTANCE.fields_by_name["labels"].message_type = _INSTANCE_LABELSENTRY -_INSTANCE_STATE.containing_type = _INSTANCE -_LISTINSTANCECONFIGSRESPONSE.fields_by_name[ - "instance_configs" -].message_type = _INSTANCECONFIG -_GETINSTANCEREQUEST.fields_by_name[ - "field_mask" -].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -_CREATEINSTANCEREQUEST.fields_by_name["instance"].message_type = _INSTANCE -_LISTINSTANCESRESPONSE.fields_by_name["instances"].message_type = _INSTANCE -_UPDATEINSTANCEREQUEST.fields_by_name["instance"].message_type = _INSTANCE -_UPDATEINSTANCEREQUEST.fields_by_name[ - "field_mask" -].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -_CREATEINSTANCEMETADATA.fields_by_name["instance"].message_type = _INSTANCE -_CREATEINSTANCEMETADATA.fields_by_name[ - "start_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATEINSTANCEMETADATA.fields_by_name[ - "cancel_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATEINSTANCEMETADATA.fields_by_name[ - "end_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_UPDATEINSTANCEMETADATA.fields_by_name["instance"].message_type = _INSTANCE -_UPDATEINSTANCEMETADATA.fields_by_name[ - "start_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_UPDATEINSTANCEMETADATA.fields_by_name[ - "cancel_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_UPDATEINSTANCEMETADATA.fields_by_name[ - "end_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -DESCRIPTOR.message_types_by_name["ReplicaInfo"] = _REPLICAINFO -DESCRIPTOR.message_types_by_name["InstanceConfig"] = _INSTANCECONFIG -DESCRIPTOR.message_types_by_name["Instance"] = _INSTANCE -DESCRIPTOR.message_types_by_name[ - "ListInstanceConfigsRequest" -] = _LISTINSTANCECONFIGSREQUEST -DESCRIPTOR.message_types_by_name[ - "ListInstanceConfigsResponse" -] = _LISTINSTANCECONFIGSRESPONSE -DESCRIPTOR.message_types_by_name["GetInstanceConfigRequest"] = _GETINSTANCECONFIGREQUEST -DESCRIPTOR.message_types_by_name["GetInstanceRequest"] = _GETINSTANCEREQUEST -DESCRIPTOR.message_types_by_name["CreateInstanceRequest"] = _CREATEINSTANCEREQUEST -DESCRIPTOR.message_types_by_name["ListInstancesRequest"] = _LISTINSTANCESREQUEST -DESCRIPTOR.message_types_by_name["ListInstancesResponse"] = _LISTINSTANCESRESPONSE -DESCRIPTOR.message_types_by_name["UpdateInstanceRequest"] = _UPDATEINSTANCEREQUEST -DESCRIPTOR.message_types_by_name["DeleteInstanceRequest"] = _DELETEINSTANCEREQUEST -DESCRIPTOR.message_types_by_name["CreateInstanceMetadata"] = _CREATEINSTANCEMETADATA -DESCRIPTOR.message_types_by_name["UpdateInstanceMetadata"] = _UPDATEINSTANCEMETADATA -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -ReplicaInfo = _reflection.GeneratedProtocolMessageType( - "ReplicaInfo", - (_message.Message,), - { - "DESCRIPTOR": _REPLICAINFO, - "__module__": "google.cloud.spanner_admin_instance_v1.proto.spanner_instance_admin_pb2", - "__doc__": """Protocol buffer. - - Attributes: - location: - The location of the serving resources, e.g. “us-central1”. - type: - The type of replica. - default_leader_location: - If true, this location is designated as the default leader - location where leader replicas are placed. See the `region - types documentation `__ for more details. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.ReplicaInfo) - }, -) -_sym_db.RegisterMessage(ReplicaInfo) - -InstanceConfig = _reflection.GeneratedProtocolMessageType( - "InstanceConfig", - (_message.Message,), - { - "DESCRIPTOR": _INSTANCECONFIG, - "__module__": "google.cloud.spanner_admin_instance_v1.proto.spanner_instance_admin_pb2", - "__doc__": """A possible configuration for a Cloud Spanner instance. Configurations - define the geographic placement of nodes and their replication. - - Attributes: - name: - A unique identifier for the instance configuration. Values are - of the form - ``projects//instanceConfigs/[a-z][-a-z0-9]*`` - display_name: - The name of this instance configuration as it appears in UIs. - replicas: - The geographic placement of nodes in this instance - configuration and their replication properties. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.InstanceConfig) - }, -) -_sym_db.RegisterMessage(InstanceConfig) - -Instance = _reflection.GeneratedProtocolMessageType( - "Instance", - (_message.Message,), - { - "LabelsEntry": _reflection.GeneratedProtocolMessageType( - "LabelsEntry", - (_message.Message,), - { - "DESCRIPTOR": _INSTANCE_LABELSENTRY, - "__module__": "google.cloud.spanner_admin_instance_v1.proto.spanner_instance_admin_pb2" - # @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.Instance.LabelsEntry) - }, - ), - "DESCRIPTOR": _INSTANCE, - "__module__": "google.cloud.spanner_admin_instance_v1.proto.spanner_instance_admin_pb2", - "__doc__": """An isolated set of Cloud Spanner resources on which databases can be - hosted. - - Attributes: - name: - Required. A unique identifier for the instance, which cannot - be changed after the instance is created. Values are of the - form ``projects//instances/[a-z][-a-z0-9]*[a-z0-9]``. - The final segment of the name must be between 2 and 64 - characters in length. - config: - Required. The name of the instance’s configuration. Values are - of the form - ``projects//instanceConfigs/``. See - also [InstanceConfig][google.spanner.admin.instance.v1.Instanc - eConfig] and [ListInstanceConfigs][google.spanner.admin.instan - ce.v1.InstanceAdmin.ListInstanceConfigs]. - display_name: - Required. The descriptive name for this instance as it appears - in UIs. Must be unique per project and between 4 and 30 - characters in length. - node_count: - Required. The number of nodes allocated to this instance. This - may be zero in API responses for instances that are not yet in - state ``READY``. See `the documentation `__ for more - information about nodes. - state: - Output only. The current instance state. For [CreateInstance][ - google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance] - , the state must be either omitted or set to ``CREATING``. For - [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmi - n.UpdateInstance], the state must be either omitted or set to - ``READY``. - labels: - Cloud Labels are a flexible and lightweight mechanism for - organizing cloud resources into groups that reflect a - customer’s organizational needs and deployment strategies. - Cloud Labels can be used to filter collections of resources. - They can be used to control how resource metrics are - aggregated. And they can be used as arguments to policy - management rules (e.g. route, firewall, load balancing, etc.). - - Label keys must be between 1 and 63 characters long and - must conform to the following regular expression: - ``[a-z]([-a-z0-9]*[a-z0-9])?``. - Label values must be - between 0 and 63 characters long and must conform to the - regular expression ``([a-z]([-a-z0-9]*[a-z0-9])?)?``. - No - more than 64 labels can be associated with a given resource. - See https://goo.gl/xmQnxf for more information on and examples - of labels. If you plan to use labels in your own code, please - note that additional characters may be allowed in the future. - And so you are advised to use an internal label - representation, such as JSON, which doesn’t rely upon specific - characters being disallowed. For example, representing labels - as the string: name + “*" + value would prove problematic if - we were to allow "*” in a future release. - endpoint_uris: - Deprecated. This field is not populated. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.Instance) - }, -) -_sym_db.RegisterMessage(Instance) -_sym_db.RegisterMessage(Instance.LabelsEntry) - -ListInstanceConfigsRequest = _reflection.GeneratedProtocolMessageType( - "ListInstanceConfigsRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTINSTANCECONFIGSREQUEST, - "__module__": "google.cloud.spanner_admin_instance_v1.proto.spanner_instance_admin_pb2", - "__doc__": """The request for [ListInstanceConfigs][google.spanner.admin.instance.v1 - .InstanceAdmin.ListInstanceConfigs]. - - Attributes: - parent: - Required. The name of the project for which a list of - supported instance configurations is requested. Values are of - the form ``projects/``. - page_size: - Number of instance configurations to be returned in the - response. If 0 or less, defaults to the server’s maximum - allowed page size. - page_token: - If non-empty, ``page_token`` should contain a [next\_page\_tok - en][google.spanner.admin.instance.v1.ListInstanceConfigsRespon - se.next\_page\_token] from a previous [ListInstanceConfigsResp - onse][google.spanner.admin.instance.v1.ListInstanceConfigsResp - onse]. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.ListInstanceConfigsRequest) - }, -) -_sym_db.RegisterMessage(ListInstanceConfigsRequest) - -ListInstanceConfigsResponse = _reflection.GeneratedProtocolMessageType( - "ListInstanceConfigsResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTINSTANCECONFIGSRESPONSE, - "__module__": "google.cloud.spanner_admin_instance_v1.proto.spanner_instance_admin_pb2", - "__doc__": """The response for [ListInstanceConfigs][google.spanner.admin.instance.v - 1.InstanceAdmin.ListInstanceConfigs]. - - Attributes: - instance_configs: - The list of requested instance configurations. - next_page_token: - \ ``next_page_token`` can be sent in a subsequent [ListInstanc - eConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListI - nstanceConfigs] call to fetch more of the matching instance - configurations. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.ListInstanceConfigsResponse) - }, -) -_sym_db.RegisterMessage(ListInstanceConfigsResponse) - -GetInstanceConfigRequest = _reflection.GeneratedProtocolMessageType( - "GetInstanceConfigRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETINSTANCECONFIGREQUEST, - "__module__": "google.cloud.spanner_admin_instance_v1.proto.spanner_instance_admin_pb2", - "__doc__": """The request for [GetInstanceConfigRequest][google.spanner.admin.instan - ce.v1.InstanceAdmin.GetInstanceConfig]. - - Attributes: - name: - Required. The name of the requested instance configuration. - Values are of the form - ``projects//instanceConfigs/``. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.GetInstanceConfigRequest) - }, -) -_sym_db.RegisterMessage(GetInstanceConfigRequest) - -GetInstanceRequest = _reflection.GeneratedProtocolMessageType( - "GetInstanceRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETINSTANCEREQUEST, - "__module__": "google.cloud.spanner_admin_instance_v1.proto.spanner_instance_admin_pb2", - "__doc__": """The request for [GetInstance][google.spanner.admin.instance.v1.Instanc - eAdmin.GetInstance]. - - Attributes: - name: - Required. The name of the requested instance. Values are of - the form ``projects//instances/``. - field_mask: - If field_mask is present, specifies the subset of - [Instance][google.spanner.admin.instance.v1.Instance] fields - that should be returned. If absent, all - [Instance][google.spanner.admin.instance.v1.Instance] fields - are returned. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.GetInstanceRequest) - }, -) -_sym_db.RegisterMessage(GetInstanceRequest) - -CreateInstanceRequest = _reflection.GeneratedProtocolMessageType( - "CreateInstanceRequest", - (_message.Message,), - { - "DESCRIPTOR": _CREATEINSTANCEREQUEST, - "__module__": "google.cloud.spanner_admin_instance_v1.proto.spanner_instance_admin_pb2", - "__doc__": """The request for [CreateInstance][google.spanner.admin.instance.v1.Inst - anceAdmin.CreateInstance]. - - Attributes: - parent: - Required. The name of the project in which to create the - instance. Values are of the form ``projects/``. - instance_id: - Required. The ID of the instance to create. Valid identifiers - are of the form ``[a-z][-a-z0-9]*[a-z0-9]`` and must be - between 2 and 64 characters in length. - instance: - Required. The instance to create. The name may be omitted, but - if specified must be ``/instances/``. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.CreateInstanceRequest) - }, -) -_sym_db.RegisterMessage(CreateInstanceRequest) - -ListInstancesRequest = _reflection.GeneratedProtocolMessageType( - "ListInstancesRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTINSTANCESREQUEST, - "__module__": "google.cloud.spanner_admin_instance_v1.proto.spanner_instance_admin_pb2", - "__doc__": """The request for [ListInstances][google.spanner.admin.instance.v1.Insta - nceAdmin.ListInstances]. - - Attributes: - parent: - Required. The name of the project for which a list of - instances is requested. Values are of the form - ``projects/``. - page_size: - Number of instances to be returned in the response. If 0 or - less, defaults to the server’s maximum allowed page size. - page_token: - If non-empty, ``page_token`` should contain a [next\_page\_tok - en][google.spanner.admin.instance.v1.ListInstancesResponse.nex - t\_page\_token] from a previous [ListInstancesResponse][google - .spanner.admin.instance.v1.ListInstancesResponse]. - filter: - An expression for filtering the results of the request. Filter - rules are case insensitive. The fields eligible for filtering - are: - ``name`` - ``display_name`` - ``labels.key`` where - key is the name of a label Some examples of using filters - are: - ``name:*`` –> The instance has a name. - - ``name:Howl`` –> The instance’s name contains the string - “howl”. - ``name:HOWL`` –> Equivalent to above. - - ``NAME:howl`` –> Equivalent to above. - ``labels.env:*`` –> - The instance has the label “env”. - ``labels.env:dev`` –> The - instance has the label “env” and the value of the label - contains the string “dev”. - ``name:howl labels.env:dev`` –> - The instance’s name contains “howl” and it has the label - “env” with its value containing “dev”. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.ListInstancesRequest) - }, -) -_sym_db.RegisterMessage(ListInstancesRequest) - -ListInstancesResponse = _reflection.GeneratedProtocolMessageType( - "ListInstancesResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTINSTANCESRESPONSE, - "__module__": "google.cloud.spanner_admin_instance_v1.proto.spanner_instance_admin_pb2", - "__doc__": """The response for [ListInstances][google.spanner.admin.instance.v1.Inst - anceAdmin.ListInstances]. - - Attributes: - instances: - The list of requested instances. - next_page_token: - \ ``next_page_token`` can be sent in a subsequent [ListInstanc - es][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanc - es] call to fetch more of the matching instances. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.ListInstancesResponse) - }, -) -_sym_db.RegisterMessage(ListInstancesResponse) - -UpdateInstanceRequest = _reflection.GeneratedProtocolMessageType( - "UpdateInstanceRequest", - (_message.Message,), - { - "DESCRIPTOR": _UPDATEINSTANCEREQUEST, - "__module__": "google.cloud.spanner_admin_instance_v1.proto.spanner_instance_admin_pb2", - "__doc__": """The request for [UpdateInstance][google.spanner.admin.instance.v1.Inst - anceAdmin.UpdateInstance]. - - Attributes: - instance: - Required. The instance to update, which must always include - the instance name. Otherwise, only fields mentioned in [field\ - _mask][google.spanner.admin.instance.v1.UpdateInstanceRequest. - field\_mask] need be included. - field_mask: - Required. A mask specifying which fields in - [Instance][google.spanner.admin.instance.v1.Instance] should - be updated. The field mask must always be specified; this - prevents any future fields in - [Instance][google.spanner.admin.instance.v1.Instance] from - being erased accidentally by clients that do not know about - them. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.UpdateInstanceRequest) - }, -) -_sym_db.RegisterMessage(UpdateInstanceRequest) - -DeleteInstanceRequest = _reflection.GeneratedProtocolMessageType( - "DeleteInstanceRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETEINSTANCEREQUEST, - "__module__": "google.cloud.spanner_admin_instance_v1.proto.spanner_instance_admin_pb2", - "__doc__": """The request for [DeleteInstance][google.spanner.admin.instance.v1.Inst - anceAdmin.DeleteInstance]. - - Attributes: - name: - Required. The name of the instance to be deleted. Values are - of the form ``projects//instances/`` - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.DeleteInstanceRequest) - }, -) -_sym_db.RegisterMessage(DeleteInstanceRequest) - -CreateInstanceMetadata = _reflection.GeneratedProtocolMessageType( - "CreateInstanceMetadata", - (_message.Message,), - { - "DESCRIPTOR": _CREATEINSTANCEMETADATA, - "__module__": "google.cloud.spanner_admin_instance_v1.proto.spanner_instance_admin_pb2", - "__doc__": """Metadata type for the operation returned by [CreateInstance][google.sp - anner.admin.instance.v1.InstanceAdmin.CreateInstance]. - - Attributes: - instance: - The instance being created. - start_time: - The time at which the [CreateInstance][google.spanner.admin.in - stance.v1.InstanceAdmin.CreateInstance] request was received. - cancel_time: - The time at which this operation was cancelled. If set, this - operation is in the process of undoing itself (which is - guaranteed to succeed) and cannot be cancelled again. - end_time: - The time at which this operation failed or was completed - successfully. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.CreateInstanceMetadata) - }, -) -_sym_db.RegisterMessage(CreateInstanceMetadata) - -UpdateInstanceMetadata = _reflection.GeneratedProtocolMessageType( - "UpdateInstanceMetadata", - (_message.Message,), - { - "DESCRIPTOR": _UPDATEINSTANCEMETADATA, - "__module__": "google.cloud.spanner_admin_instance_v1.proto.spanner_instance_admin_pb2", - "__doc__": """Metadata type for the operation returned by [UpdateInstance][google.sp - anner.admin.instance.v1.InstanceAdmin.UpdateInstance]. - - Attributes: - instance: - The desired end state of the update. - start_time: - The time at which [UpdateInstance][google.spanner.admin.instan - ce.v1.InstanceAdmin.UpdateInstance] request was received. - cancel_time: - The time at which this operation was cancelled. If set, this - operation is in the process of undoing itself (which is - guaranteed to succeed) and cannot be cancelled again. - end_time: - The time at which this operation failed or was completed - successfully. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.UpdateInstanceMetadata) - }, -) -_sym_db.RegisterMessage(UpdateInstanceMetadata) - - -DESCRIPTOR._options = None -_INSTANCECONFIG._options = None -_INSTANCE_LABELSENTRY._options = None -_INSTANCE.fields_by_name["config"]._options = None -_INSTANCE._options = None -_LISTINSTANCECONFIGSREQUEST.fields_by_name["parent"]._options = None -_GETINSTANCECONFIGREQUEST.fields_by_name["name"]._options = None -_GETINSTANCEREQUEST.fields_by_name["name"]._options = None -_CREATEINSTANCEREQUEST.fields_by_name["parent"]._options = None -_CREATEINSTANCEREQUEST.fields_by_name["instance_id"]._options = None -_CREATEINSTANCEREQUEST.fields_by_name["instance"]._options = None -_LISTINSTANCESREQUEST.fields_by_name["parent"]._options = None -_UPDATEINSTANCEREQUEST.fields_by_name["instance"]._options = None -_UPDATEINSTANCEREQUEST.fields_by_name["field_mask"]._options = None -_DELETEINSTANCEREQUEST.fields_by_name["name"]._options = None - -_INSTANCEADMIN = _descriptor.ServiceDescriptor( - name="InstanceAdmin", - full_name="google.spanner.admin.instance.v1.InstanceAdmin", - file=DESCRIPTOR, - index=0, - serialized_options=b"\312A\026spanner.googleapis.com\322A\\https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/spanner.admin", - create_key=_descriptor._internal_create_key, - serialized_start=2957, - serialized_end=5068, - methods=[ - _descriptor.MethodDescriptor( - name="ListInstanceConfigs", - full_name="google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs", - index=0, - containing_service=None, - input_type=_LISTINSTANCECONFIGSREQUEST, - output_type=_LISTINSTANCECONFIGSRESPONSE, - serialized_options=b"\202\323\344\223\002)\022'/v1/{parent=projects/*}/instanceConfigs\332A\006parent", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetInstanceConfig", - full_name="google.spanner.admin.instance.v1.InstanceAdmin.GetInstanceConfig", - index=1, - containing_service=None, - input_type=_GETINSTANCECONFIGREQUEST, - output_type=_INSTANCECONFIG, - serialized_options=b"\202\323\344\223\002)\022'/v1/{name=projects/*/instanceConfigs/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ListInstances", - full_name="google.spanner.admin.instance.v1.InstanceAdmin.ListInstances", - index=2, - containing_service=None, - input_type=_LISTINSTANCESREQUEST, - output_type=_LISTINSTANCESRESPONSE, - serialized_options=b"\202\323\344\223\002#\022!/v1/{parent=projects/*}/instances\332A\006parent", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetInstance", - full_name="google.spanner.admin.instance.v1.InstanceAdmin.GetInstance", - index=3, - containing_service=None, - input_type=_GETINSTANCEREQUEST, - output_type=_INSTANCE, - serialized_options=b"\202\323\344\223\002#\022!/v1/{name=projects/*/instances/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="CreateInstance", - full_name="google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance", - index=4, - containing_service=None, - input_type=_CREATEINSTANCEREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\002&"!/v1/{parent=projects/*}/instances:\001*\332A\033parent,instance_id,instance\312Ad\n)google.spanner.admin.instance.v1.Instance\0227google.spanner.admin.instance.v1.CreateInstanceMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="UpdateInstance", - full_name="google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance", - index=5, - containing_service=None, - input_type=_UPDATEINSTANCEREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b"\202\323\344\223\002/2*/v1/{instance.name=projects/*/instances/*}:\001*\332A\023instance,field_mask\312Ad\n)google.spanner.admin.instance.v1.Instance\0227google.spanner.admin.instance.v1.UpdateInstanceMetadata", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DeleteInstance", - full_name="google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstance", - index=6, - containing_service=None, - input_type=_DELETEINSTANCEREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b"\202\323\344\223\002#*!/v1/{name=projects/*/instances/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="SetIamPolicy", - full_name="google.spanner.admin.instance.v1.InstanceAdmin.SetIamPolicy", - index=7, - containing_service=None, - input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._SETIAMPOLICYREQUEST, - output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, - serialized_options=b'\202\323\344\223\0027"2/v1/{resource=projects/*/instances/*}:setIamPolicy:\001*\332A\017resource,policy', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetIamPolicy", - full_name="google.spanner.admin.instance.v1.InstanceAdmin.GetIamPolicy", - index=8, - containing_service=None, - input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._GETIAMPOLICYREQUEST, - output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, - serialized_options=b'\202\323\344\223\0027"2/v1/{resource=projects/*/instances/*}:getIamPolicy:\001*\332A\010resource', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="TestIamPermissions", - full_name="google.spanner.admin.instance.v1.InstanceAdmin.TestIamPermissions", - index=9, - containing_service=None, - input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSREQUEST, - output_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSRESPONSE, - serialized_options=b'\202\323\344\223\002="8/v1/{resource=projects/*/instances/*}:testIamPermissions:\001*\332A\024resource,permissions', - create_key=_descriptor._internal_create_key, - ), - ], -) -_sym_db.RegisterServiceDescriptor(_INSTANCEADMIN) - -DESCRIPTOR.services_by_name["InstanceAdmin"] = _INSTANCEADMIN - -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin_pb2_grpc.py b/google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin_pb2_grpc.py deleted file mode 100644 index 29964606bd..0000000000 --- a/google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin_pb2_grpc.py +++ /dev/null @@ -1,640 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - -from google.cloud.spanner_admin_instance_v1.proto import ( - spanner_instance_admin_pb2 as google_dot_cloud_dot_spanner__admin__instance__v1_dot_proto_dot_spanner__instance__admin__pb2, -) -from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 -from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -class InstanceAdminStub(object): - """Cloud Spanner Instance Admin API - - The Cloud Spanner Instance Admin API can be used to create, delete, - modify and list instances. Instances are dedicated Cloud Spanner serving - and storage resources to be used by Cloud Spanner databases. - - Each instance has a "configuration", which dictates where the - serving resources for the Cloud Spanner instance are located (e.g., - US-central, Europe). Configurations are created by Google based on - resource availability. - - Cloud Spanner billing is based on the instances that exist and their - sizes. After an instance exists, there are no additional - per-database or per-operation charges for use of the instance - (though there may be additional network bandwidth charges). - Instances offer isolation: problems with databases in one instance - will not affect other instances. However, within an instance - databases can affect each other. For example, if one database in an - instance receives a lot of requests and consumes most of the - instance resources, fewer resources are available for other - databases in that instance, and their performance may suffer. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.ListInstanceConfigs = channel.unary_unary( - "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstanceConfigs", - request_serializer=google_dot_cloud_dot_spanner__admin__instance__v1_dot_proto_dot_spanner__instance__admin__pb2.ListInstanceConfigsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_spanner__admin__instance__v1_dot_proto_dot_spanner__instance__admin__pb2.ListInstanceConfigsResponse.FromString, - ) - self.GetInstanceConfig = channel.unary_unary( - "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstanceConfig", - request_serializer=google_dot_cloud_dot_spanner__admin__instance__v1_dot_proto_dot_spanner__instance__admin__pb2.GetInstanceConfigRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_spanner__admin__instance__v1_dot_proto_dot_spanner__instance__admin__pb2.InstanceConfig.FromString, - ) - self.ListInstances = channel.unary_unary( - "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstances", - request_serializer=google_dot_cloud_dot_spanner__admin__instance__v1_dot_proto_dot_spanner__instance__admin__pb2.ListInstancesRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_spanner__admin__instance__v1_dot_proto_dot_spanner__instance__admin__pb2.ListInstancesResponse.FromString, - ) - self.GetInstance = channel.unary_unary( - "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstance", - request_serializer=google_dot_cloud_dot_spanner__admin__instance__v1_dot_proto_dot_spanner__instance__admin__pb2.GetInstanceRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_spanner__admin__instance__v1_dot_proto_dot_spanner__instance__admin__pb2.Instance.FromString, - ) - self.CreateInstance = channel.unary_unary( - "/google.spanner.admin.instance.v1.InstanceAdmin/CreateInstance", - request_serializer=google_dot_cloud_dot_spanner__admin__instance__v1_dot_proto_dot_spanner__instance__admin__pb2.CreateInstanceRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.UpdateInstance = channel.unary_unary( - "/google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstance", - request_serializer=google_dot_cloud_dot_spanner__admin__instance__v1_dot_proto_dot_spanner__instance__admin__pb2.UpdateInstanceRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.DeleteInstance = channel.unary_unary( - "/google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstance", - request_serializer=google_dot_cloud_dot_spanner__admin__instance__v1_dot_proto_dot_spanner__instance__admin__pb2.DeleteInstanceRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.SetIamPolicy = channel.unary_unary( - "/google.spanner.admin.instance.v1.InstanceAdmin/SetIamPolicy", - request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString, - response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - ) - self.GetIamPolicy = channel.unary_unary( - "/google.spanner.admin.instance.v1.InstanceAdmin/GetIamPolicy", - request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString, - response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - ) - self.TestIamPermissions = channel.unary_unary( - "/google.spanner.admin.instance.v1.InstanceAdmin/TestIamPermissions", - request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString, - response_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString, - ) - - -class InstanceAdminServicer(object): - """Cloud Spanner Instance Admin API - - The Cloud Spanner Instance Admin API can be used to create, delete, - modify and list instances. Instances are dedicated Cloud Spanner serving - and storage resources to be used by Cloud Spanner databases. - - Each instance has a "configuration", which dictates where the - serving resources for the Cloud Spanner instance are located (e.g., - US-central, Europe). Configurations are created by Google based on - resource availability. - - Cloud Spanner billing is based on the instances that exist and their - sizes. After an instance exists, there are no additional - per-database or per-operation charges for use of the instance - (though there may be additional network bandwidth charges). - Instances offer isolation: problems with databases in one instance - will not affect other instances. However, within an instance - databases can affect each other. For example, if one database in an - instance receives a lot of requests and consumes most of the - instance resources, fewer resources are available for other - databases in that instance, and their performance may suffer. - """ - - def ListInstanceConfigs(self, request, context): - """Lists the supported instance configurations for a given project. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetInstanceConfig(self, request, context): - """Gets information about a particular instance configuration. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListInstances(self, request, context): - """Lists all instances in the given project. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetInstance(self, request, context): - """Gets information about a particular instance. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CreateInstance(self, request, context): - """Creates an instance and begins preparing it to begin serving. The - returned [long-running operation][google.longrunning.Operation] - can be used to track the progress of preparing the new - instance. The instance name is assigned by the caller. If the - named instance already exists, `CreateInstance` returns - `ALREADY_EXISTS`. - - Immediately upon completion of this request: - - * The instance is readable via the API, with all requested attributes - but no allocated resources. Its state is `CREATING`. - - Until completion of the returned operation: - - * Cancelling the operation renders the instance immediately unreadable - via the API. - * The instance can be deleted. - * All other attempts to modify the instance are rejected. - - Upon completion of the returned operation: - - * Billing for all successfully-allocated resources begins (some types - may have lower than the requested levels). - * Databases can be created in the instance. - * The instance's allocated resource levels are readable via the API. - * The instance's state becomes `READY`. - - The returned [long-running operation][google.longrunning.Operation] will - have a name of the format `/operations/` and - can be used to track creation of the instance. The - [metadata][google.longrunning.Operation.metadata] field type is - [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata]. - The [response][google.longrunning.Operation.response] field type is - [Instance][google.spanner.admin.instance.v1.Instance], if successful. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateInstance(self, request, context): - """Updates an instance, and begins allocating or releasing resources - as requested. The returned [long-running - operation][google.longrunning.Operation] can be used to track the - progress of updating the instance. If the named instance does not - exist, returns `NOT_FOUND`. - - Immediately upon completion of this request: - - * For resource types for which a decrease in the instance's allocation - has been requested, billing is based on the newly-requested level. - - Until completion of the returned operation: - - * Cancelling the operation sets its metadata's - [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time], and begins - restoring resources to their pre-request values. The operation - is guaranteed to succeed at undoing all resource changes, - after which point it terminates with a `CANCELLED` status. - * All other attempts to modify the instance are rejected. - * Reading the instance via the API continues to give the pre-request - resource levels. - - Upon completion of the returned operation: - - * Billing begins for all successfully-allocated resources (some types - may have lower than the requested levels). - * All newly-reserved resources are available for serving the instance's - tables. - * The instance's new resource levels are readable via the API. - - The returned [long-running operation][google.longrunning.Operation] will - have a name of the format `/operations/` and - can be used to track the instance modification. The - [metadata][google.longrunning.Operation.metadata] field type is - [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata]. - The [response][google.longrunning.Operation.response] field type is - [Instance][google.spanner.admin.instance.v1.Instance], if successful. - - Authorization requires `spanner.instances.update` permission on - resource [name][google.spanner.admin.instance.v1.Instance.name]. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteInstance(self, request, context): - """Deletes an instance. - - Immediately upon completion of the request: - - * Billing ceases for all of the instance's reserved resources. - - Soon afterward: - - * The instance and *all of its databases* immediately and - irrevocably disappear from the API. All data in the databases - is permanently deleted. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def SetIamPolicy(self, request, context): - """Sets the access control policy on an instance resource. Replaces any - existing policy. - - Authorization requires `spanner.instances.setIamPolicy` on - [resource][google.iam.v1.SetIamPolicyRequest.resource]. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetIamPolicy(self, request, context): - """Gets the access control policy for an instance resource. Returns an empty - policy if an instance exists but does not have a policy set. - - Authorization requires `spanner.instances.getIamPolicy` on - [resource][google.iam.v1.GetIamPolicyRequest.resource]. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def TestIamPermissions(self, request, context): - """Returns permissions that the caller has on the specified instance resource. - - Attempting this RPC on a non-existent Cloud Spanner instance resource will - result in a NOT_FOUND error if the user has `spanner.instances.list` - permission on the containing Google Cloud Project. Otherwise returns an - empty set of permissions. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_InstanceAdminServicer_to_server(servicer, server): - rpc_method_handlers = { - "ListInstanceConfigs": grpc.unary_unary_rpc_method_handler( - servicer.ListInstanceConfigs, - request_deserializer=google_dot_cloud_dot_spanner__admin__instance__v1_dot_proto_dot_spanner__instance__admin__pb2.ListInstanceConfigsRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__admin__instance__v1_dot_proto_dot_spanner__instance__admin__pb2.ListInstanceConfigsResponse.SerializeToString, - ), - "GetInstanceConfig": grpc.unary_unary_rpc_method_handler( - servicer.GetInstanceConfig, - request_deserializer=google_dot_cloud_dot_spanner__admin__instance__v1_dot_proto_dot_spanner__instance__admin__pb2.GetInstanceConfigRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__admin__instance__v1_dot_proto_dot_spanner__instance__admin__pb2.InstanceConfig.SerializeToString, - ), - "ListInstances": grpc.unary_unary_rpc_method_handler( - servicer.ListInstances, - request_deserializer=google_dot_cloud_dot_spanner__admin__instance__v1_dot_proto_dot_spanner__instance__admin__pb2.ListInstancesRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__admin__instance__v1_dot_proto_dot_spanner__instance__admin__pb2.ListInstancesResponse.SerializeToString, - ), - "GetInstance": grpc.unary_unary_rpc_method_handler( - servicer.GetInstance, - request_deserializer=google_dot_cloud_dot_spanner__admin__instance__v1_dot_proto_dot_spanner__instance__admin__pb2.GetInstanceRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__admin__instance__v1_dot_proto_dot_spanner__instance__admin__pb2.Instance.SerializeToString, - ), - "CreateInstance": grpc.unary_unary_rpc_method_handler( - servicer.CreateInstance, - request_deserializer=google_dot_cloud_dot_spanner__admin__instance__v1_dot_proto_dot_spanner__instance__admin__pb2.CreateInstanceRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "UpdateInstance": grpc.unary_unary_rpc_method_handler( - servicer.UpdateInstance, - request_deserializer=google_dot_cloud_dot_spanner__admin__instance__v1_dot_proto_dot_spanner__instance__admin__pb2.UpdateInstanceRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "DeleteInstance": grpc.unary_unary_rpc_method_handler( - servicer.DeleteInstance, - request_deserializer=google_dot_cloud_dot_spanner__admin__instance__v1_dot_proto_dot_spanner__instance__admin__pb2.DeleteInstanceRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "SetIamPolicy": grpc.unary_unary_rpc_method_handler( - servicer.SetIamPolicy, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, - ), - "GetIamPolicy": grpc.unary_unary_rpc_method_handler( - servicer.GetIamPolicy, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, - ), - "TestIamPermissions": grpc.unary_unary_rpc_method_handler( - servicer.TestIamPermissions, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.spanner.admin.instance.v1.InstanceAdmin", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) - - -# This class is part of an EXPERIMENTAL API. -class InstanceAdmin(object): - """Cloud Spanner Instance Admin API - - The Cloud Spanner Instance Admin API can be used to create, delete, - modify and list instances. Instances are dedicated Cloud Spanner serving - and storage resources to be used by Cloud Spanner databases. - - Each instance has a "configuration", which dictates where the - serving resources for the Cloud Spanner instance are located (e.g., - US-central, Europe). Configurations are created by Google based on - resource availability. - - Cloud Spanner billing is based on the instances that exist and their - sizes. After an instance exists, there are no additional - per-database or per-operation charges for use of the instance - (though there may be additional network bandwidth charges). - Instances offer isolation: problems with databases in one instance - will not affect other instances. However, within an instance - databases can affect each other. For example, if one database in an - instance receives a lot of requests and consumes most of the - instance resources, fewer resources are available for other - databases in that instance, and their performance may suffer. - """ - - @staticmethod - def ListInstanceConfigs( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstanceConfigs", - google_dot_cloud_dot_spanner__admin__instance__v1_dot_proto_dot_spanner__instance__admin__pb2.ListInstanceConfigsRequest.SerializeToString, - google_dot_cloud_dot_spanner__admin__instance__v1_dot_proto_dot_spanner__instance__admin__pb2.ListInstanceConfigsResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetInstanceConfig( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstanceConfig", - google_dot_cloud_dot_spanner__admin__instance__v1_dot_proto_dot_spanner__instance__admin__pb2.GetInstanceConfigRequest.SerializeToString, - google_dot_cloud_dot_spanner__admin__instance__v1_dot_proto_dot_spanner__instance__admin__pb2.InstanceConfig.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ListInstances( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstances", - google_dot_cloud_dot_spanner__admin__instance__v1_dot_proto_dot_spanner__instance__admin__pb2.ListInstancesRequest.SerializeToString, - google_dot_cloud_dot_spanner__admin__instance__v1_dot_proto_dot_spanner__instance__admin__pb2.ListInstancesResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetInstance( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstance", - google_dot_cloud_dot_spanner__admin__instance__v1_dot_proto_dot_spanner__instance__admin__pb2.GetInstanceRequest.SerializeToString, - google_dot_cloud_dot_spanner__admin__instance__v1_dot_proto_dot_spanner__instance__admin__pb2.Instance.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def CreateInstance( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.admin.instance.v1.InstanceAdmin/CreateInstance", - google_dot_cloud_dot_spanner__admin__instance__v1_dot_proto_dot_spanner__instance__admin__pb2.CreateInstanceRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def UpdateInstance( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstance", - google_dot_cloud_dot_spanner__admin__instance__v1_dot_proto_dot_spanner__instance__admin__pb2.UpdateInstanceRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def DeleteInstance( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstance", - google_dot_cloud_dot_spanner__admin__instance__v1_dot_proto_dot_spanner__instance__admin__pb2.DeleteInstanceRequest.SerializeToString, - google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def SetIamPolicy( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.admin.instance.v1.InstanceAdmin/SetIamPolicy", - google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString, - google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetIamPolicy( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.admin.instance.v1.InstanceAdmin/GetIamPolicy", - google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString, - google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def TestIamPermissions( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.admin.instance.v1.InstanceAdmin/TestIamPermissions", - google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString, - google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) diff --git a/google/cloud/spanner_admin_instance_v1/py.typed b/google/cloud/spanner_admin_instance_v1/py.typed new file mode 100644 index 0000000000..915a8e55e3 --- /dev/null +++ b/google/cloud/spanner_admin_instance_v1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-spanner-admin-instance package uses inline types. diff --git a/google/cloud/spanner_admin_instance_v1/services/__init__.py b/google/cloud/spanner_admin_instance_v1/services/__init__.py new file mode 100644 index 0000000000..42ffdf2bc4 --- /dev/null +++ b/google/cloud/spanner_admin_instance_v1/services/__init__.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/__init__.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/__init__.py new file mode 100644 index 0000000000..88c7894332 --- /dev/null +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/__init__.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .client import InstanceAdminClient +from .async_client import InstanceAdminAsyncClient + +__all__ = ( + "InstanceAdminClient", + "InstanceAdminAsyncClient", +) diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py new file mode 100644 index 0000000000..fd4cd3d18d --- /dev/null +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py @@ -0,0 +1,1282 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.spanner_admin_instance_v1.services.instance_admin import pagers +from google.cloud.spanner_admin_instance_v1.types import spanner_instance_admin +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.protobuf import field_mask_pb2 as gp_field_mask # type: ignore + +from .transports.base import InstanceAdminTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import InstanceAdminGrpcAsyncIOTransport +from .client import InstanceAdminClient + + +class InstanceAdminAsyncClient: + """Cloud Spanner Instance Admin API + The Cloud Spanner Instance Admin API can be used to create, + delete, modify and list instances. Instances are dedicated Cloud + Spanner serving and storage resources to be used by Cloud + Spanner databases. + Each instance has a "configuration", which dictates where the + serving resources for the Cloud Spanner instance are located + (e.g., US-central, Europe). Configurations are created by Google + based on resource availability. + + Cloud Spanner billing is based on the instances that exist and + their sizes. After an instance exists, there are no additional + per-database or per-operation charges for use of the instance + (though there may be additional network bandwidth charges). + Instances offer isolation: problems with databases in one + instance will not affect other instances. However, within an + instance databases can affect each other. For example, if one + database in an instance receives a lot of requests and consumes + most of the instance resources, fewer resources are available + for other databases in that instance, and their performance may + suffer. + """ + + _client: InstanceAdminClient + + DEFAULT_ENDPOINT = InstanceAdminClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = InstanceAdminClient.DEFAULT_MTLS_ENDPOINT + + instance_path = staticmethod(InstanceAdminClient.instance_path) + parse_instance_path = staticmethod(InstanceAdminClient.parse_instance_path) + instance_config_path = staticmethod(InstanceAdminClient.instance_config_path) + parse_instance_config_path = staticmethod( + InstanceAdminClient.parse_instance_config_path + ) + + common_billing_account_path = staticmethod( + InstanceAdminClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + InstanceAdminClient.parse_common_billing_account_path + ) + + common_folder_path = staticmethod(InstanceAdminClient.common_folder_path) + parse_common_folder_path = staticmethod( + InstanceAdminClient.parse_common_folder_path + ) + + common_organization_path = staticmethod( + InstanceAdminClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + InstanceAdminClient.parse_common_organization_path + ) + + common_project_path = staticmethod(InstanceAdminClient.common_project_path) + parse_common_project_path = staticmethod( + InstanceAdminClient.parse_common_project_path + ) + + common_location_path = staticmethod(InstanceAdminClient.common_location_path) + parse_common_location_path = staticmethod( + InstanceAdminClient.parse_common_location_path + ) + + from_service_account_file = InstanceAdminClient.from_service_account_file + from_service_account_json = from_service_account_file + + @property + def transport(self) -> InstanceAdminTransport: + """Return the transport used by the client instance. + + Returns: + InstanceAdminTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial( + type(InstanceAdminClient).get_transport_class, type(InstanceAdminClient) + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, InstanceAdminTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the instance admin client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.InstanceAdminTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = InstanceAdminClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def list_instance_configs( + self, + request: spanner_instance_admin.ListInstanceConfigsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListInstanceConfigsAsyncPager: + r"""Lists the supported instance configurations for a + given project. + + Args: + request (:class:`~.spanner_instance_admin.ListInstanceConfigsRequest`): + The request object. The request for + [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]. + parent (:class:`str`): + Required. The name of the project for which a list of + supported instance configurations is requested. Values + are of the form ``projects/``. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListInstanceConfigsAsyncPager: + The response for + [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = spanner_instance_admin.ListInstanceConfigsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_instance_configs, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=3600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListInstanceConfigsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_instance_config( + self, + request: spanner_instance_admin.GetInstanceConfigRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> spanner_instance_admin.InstanceConfig: + r"""Gets information about a particular instance + configuration. + + Args: + request (:class:`~.spanner_instance_admin.GetInstanceConfigRequest`): + The request object. The request for + [GetInstanceConfigRequest][google.spanner.admin.instance.v1.InstanceAdmin.GetInstanceConfig]. + name (:class:`str`): + Required. The name of the requested instance + configuration. Values are of the form + ``projects//instanceConfigs/``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.spanner_instance_admin.InstanceConfig: + A possible configuration for a Cloud + Spanner instance. Configurations define + the geographic placement of nodes and + their replication. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = spanner_instance_admin.GetInstanceConfigRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_instance_config, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=3600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_instances( + self, + request: spanner_instance_admin.ListInstancesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListInstancesAsyncPager: + r"""Lists all instances in the given project. + + Args: + request (:class:`~.spanner_instance_admin.ListInstancesRequest`): + The request object. The request for + [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances]. + parent (:class:`str`): + Required. The name of the project for which a list of + instances is requested. Values are of the form + ``projects/``. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListInstancesAsyncPager: + The response for + [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = spanner_instance_admin.ListInstancesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_instances, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=3600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListInstancesAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_instance( + self, + request: spanner_instance_admin.GetInstanceRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> spanner_instance_admin.Instance: + r"""Gets information about a particular instance. + + Args: + request (:class:`~.spanner_instance_admin.GetInstanceRequest`): + The request object. The request for + [GetInstance][google.spanner.admin.instance.v1.InstanceAdmin.GetInstance]. + name (:class:`str`): + Required. The name of the requested instance. Values are + of the form ``projects//instances/``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.spanner_instance_admin.Instance: + An isolated set of Cloud Spanner + resources on which databases can be + hosted. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = spanner_instance_admin.GetInstanceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_instance, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=3600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def create_instance( + self, + request: spanner_instance_admin.CreateInstanceRequest = None, + *, + parent: str = None, + instance_id: str = None, + instance: spanner_instance_admin.Instance = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates an instance and begins preparing it to begin serving. + The returned [long-running + operation][google.longrunning.Operation] can be used to track + the progress of preparing the new instance. The instance name is + assigned by the caller. If the named instance already exists, + ``CreateInstance`` returns ``ALREADY_EXISTS``. + + Immediately upon completion of this request: + + - The instance is readable via the API, with all requested + attributes but no allocated resources. Its state is + ``CREATING``. + + Until completion of the returned operation: + + - Cancelling the operation renders the instance immediately + unreadable via the API. + - The instance can be deleted. + - All other attempts to modify the instance are rejected. + + Upon completion of the returned operation: + + - Billing for all successfully-allocated resources begins (some + types may have lower than the requested levels). + - Databases can be created in the instance. + - The instance's allocated resource levels are readable via the + API. + - The instance's state becomes ``READY``. + + The returned [long-running + operation][google.longrunning.Operation] will have a name of the + format ``/operations/`` and can be + used to track creation of the instance. The + [metadata][google.longrunning.Operation.metadata] field type is + [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata]. + The [response][google.longrunning.Operation.response] field type + is [Instance][google.spanner.admin.instance.v1.Instance], if + successful. + + Args: + request (:class:`~.spanner_instance_admin.CreateInstanceRequest`): + The request object. The request for + [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]. + parent (:class:`str`): + Required. The name of the project in which to create the + instance. Values are of the form ``projects/``. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_id (:class:`str`): + Required. The ID of the instance to create. Valid + identifiers are of the form ``[a-z][-a-z0-9]*[a-z0-9]`` + and must be between 2 and 64 characters in length. + This corresponds to the ``instance_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (:class:`~.spanner_instance_admin.Instance`): + Required. The instance to create. The name may be + omitted, but if specified must be + ``/instances/``. + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.spanner_instance_admin.Instance``: An + isolated set of Cloud Spanner resources on which + databases can be hosted. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, instance_id, instance]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = spanner_instance_admin.CreateInstanceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if instance_id is not None: + request.instance_id = instance_id + if instance is not None: + request.instance = instance + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_instance, + default_timeout=3600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + spanner_instance_admin.Instance, + metadata_type=spanner_instance_admin.CreateInstanceMetadata, + ) + + # Done; return the response. + return response + + async def update_instance( + self, + request: spanner_instance_admin.UpdateInstanceRequest = None, + *, + instance: spanner_instance_admin.Instance = None, + field_mask: gp_field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates an instance, and begins allocating or releasing + resources as requested. The returned [long-running + operation][google.longrunning.Operation] can be used to track + the progress of updating the instance. If the named instance + does not exist, returns ``NOT_FOUND``. + + Immediately upon completion of this request: + + - For resource types for which a decrease in the instance's + allocation has been requested, billing is based on the + newly-requested level. + + Until completion of the returned operation: + + - Cancelling the operation sets its metadata's + [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time], + and begins restoring resources to their pre-request values. + The operation is guaranteed to succeed at undoing all + resource changes, after which point it terminates with a + ``CANCELLED`` status. + - All other attempts to modify the instance are rejected. + - Reading the instance via the API continues to give the + pre-request resource levels. + + Upon completion of the returned operation: + + - Billing begins for all successfully-allocated resources (some + types may have lower than the requested levels). + - All newly-reserved resources are available for serving the + instance's tables. + - The instance's new resource levels are readable via the API. + + The returned [long-running + operation][google.longrunning.Operation] will have a name of the + format ``/operations/`` and can be + used to track the instance modification. The + [metadata][google.longrunning.Operation.metadata] field type is + [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata]. + The [response][google.longrunning.Operation.response] field type + is [Instance][google.spanner.admin.instance.v1.Instance], if + successful. + + Authorization requires ``spanner.instances.update`` permission + on resource + [name][google.spanner.admin.instance.v1.Instance.name]. + + Args: + request (:class:`~.spanner_instance_admin.UpdateInstanceRequest`): + The request object. The request for + [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]. + instance (:class:`~.spanner_instance_admin.Instance`): + Required. The instance to update, which must always + include the instance name. Otherwise, only fields + mentioned in + [field_mask][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask] + need be included. + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + field_mask (:class:`~.gp_field_mask.FieldMask`): + Required. A mask specifying which fields in + [Instance][google.spanner.admin.instance.v1.Instance] + should be updated. The field mask must always be + specified; this prevents any future fields in + [Instance][google.spanner.admin.instance.v1.Instance] + from being erased accidentally by clients that do not + know about them. + This corresponds to the ``field_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.spanner_instance_admin.Instance``: An + isolated set of Cloud Spanner resources on which + databases can be hosted. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([instance, field_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = spanner_instance_admin.UpdateInstanceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if instance is not None: + request.instance = instance + if field_mask is not None: + request.field_mask = field_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_instance, + default_timeout=3600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("instance.name", request.instance.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + spanner_instance_admin.Instance, + metadata_type=spanner_instance_admin.UpdateInstanceMetadata, + ) + + # Done; return the response. + return response + + async def delete_instance( + self, + request: spanner_instance_admin.DeleteInstanceRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes an instance. + + Immediately upon completion of the request: + + - Billing ceases for all of the instance's reserved resources. + + Soon afterward: + + - The instance and *all of its databases* immediately and + irrevocably disappear from the API. All data in the databases + is permanently deleted. + + Args: + request (:class:`~.spanner_instance_admin.DeleteInstanceRequest`): + The request object. The request for + [DeleteInstance][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstance]. + name (:class:`str`): + Required. The name of the instance to be deleted. Values + are of the form + ``projects//instances/`` + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = spanner_instance_admin.DeleteInstanceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_instance, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=3600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + async def set_iam_policy( + self, + request: iam_policy.SetIamPolicyRequest = None, + *, + resource: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy.Policy: + r"""Sets the access control policy on an instance resource. Replaces + any existing policy. + + Authorization requires ``spanner.instances.setIamPolicy`` on + [resource][google.iam.v1.SetIamPolicyRequest.resource]. + + Args: + request (:class:`~.iam_policy.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy is being specified. See the + operation documentation for the + appropriate value for this field. + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.policy.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.SetIamPolicyRequest(**request) + + elif not request: + request = iam_policy.SetIamPolicyRequest(resource=resource,) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: iam_policy.GetIamPolicyRequest = None, + *, + resource: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy.Policy: + r"""Gets the access control policy for an instance resource. Returns + an empty policy if an instance exists but does not have a policy + set. + + Authorization requires ``spanner.instances.getIamPolicy`` on + [resource][google.iam.v1.GetIamPolicyRequest.resource]. + + Args: + request (:class:`~.iam_policy.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy is being requested. See the + operation documentation for the + appropriate value for this field. + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.policy.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.GetIamPolicyRequest(**request) + + elif not request: + request = iam_policy.GetIamPolicyRequest(resource=resource,) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_iam_policy, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy.TestIamPermissionsRequest = None, + *, + resource: str = None, + permissions: Sequence[str] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy.TestIamPermissionsResponse: + r"""Returns permissions that the caller has on the specified + instance resource. + + Attempting this RPC on a non-existent Cloud Spanner instance + resource will result in a NOT_FOUND error if the user has + ``spanner.instances.list`` permission on the containing Google + Cloud Project. Otherwise returns an empty set of permissions. + + Args: + request (:class:`~.iam_policy.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy detail is being requested. See + the operation documentation for the + appropriate value for this field. + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + permissions (:class:`Sequence[str]`): + The set of permissions to check for the ``resource``. + Permissions with wildcards (such as '*' or 'storage.*') + are not allowed. For more information see `IAM + Overview `__. + This corresponds to the ``permissions`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.iam_policy.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource, permissions]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.TestIamPermissionsRequest(**request) + + elif not request: + request = iam_policy.TestIamPermissionsRequest( + resource=resource, permissions=permissions, + ) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-spanner-admin-instance", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("InstanceAdminAsyncClient",) diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py new file mode 100644 index 0000000000..c82a2065bc --- /dev/null +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py @@ -0,0 +1,1427 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.spanner_admin_instance_v1.services.instance_admin import pagers +from google.cloud.spanner_admin_instance_v1.types import spanner_instance_admin +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.protobuf import field_mask_pb2 as gp_field_mask # type: ignore + +from .transports.base import InstanceAdminTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import InstanceAdminGrpcTransport +from .transports.grpc_asyncio import InstanceAdminGrpcAsyncIOTransport + + +class InstanceAdminClientMeta(type): + """Metaclass for the InstanceAdmin client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = OrderedDict() # type: Dict[str, Type[InstanceAdminTransport]] + _transport_registry["grpc"] = InstanceAdminGrpcTransport + _transport_registry["grpc_asyncio"] = InstanceAdminGrpcAsyncIOTransport + + def get_transport_class(cls, label: str = None,) -> Type[InstanceAdminTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class InstanceAdminClient(metaclass=InstanceAdminClientMeta): + """Cloud Spanner Instance Admin API + The Cloud Spanner Instance Admin API can be used to create, + delete, modify and list instances. Instances are dedicated Cloud + Spanner serving and storage resources to be used by Cloud + Spanner databases. + Each instance has a "configuration", which dictates where the + serving resources for the Cloud Spanner instance are located + (e.g., US-central, Europe). Configurations are created by Google + based on resource availability. + + Cloud Spanner billing is based on the instances that exist and + their sizes. After an instance exists, there are no additional + per-database or per-operation charges for use of the instance + (though there may be additional network bandwidth charges). + Instances offer isolation: problems with databases in one + instance will not affect other instances. However, within an + instance databases can affect each other. For example, if one + database in an instance receives a lot of requests and consumes + most of the instance resources, fewer resources are available + for other databases in that instance, and their performance may + suffer. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "spanner.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + {@api.name}: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> InstanceAdminTransport: + """Return the transport used by the client instance. + + Returns: + InstanceAdminTransport: The transport used by the client instance. + """ + return self._transport + + @staticmethod + def instance_path(project: str, instance: str,) -> str: + """Return a fully-qualified instance string.""" + return "projects/{project}/instances/{instance}".format( + project=project, instance=instance, + ) + + @staticmethod + def parse_instance_path(path: str) -> Dict[str, str]: + """Parse a instance path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def instance_config_path(project: str, instance_config: str,) -> str: + """Return a fully-qualified instance_config string.""" + return "projects/{project}/instanceConfigs/{instance_config}".format( + project=project, instance_config=instance_config, + ) + + @staticmethod + def parse_instance_config_path(path: str) -> Dict[str, str]: + """Parse a instance_config path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/instanceConfigs/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str,) -> str: + """Return a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str,) -> str: + """Return a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder,) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str,) -> str: + """Return a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization,) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str,) -> str: + """Return a fully-qualified project string.""" + return "projects/{project}".format(project=project,) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str,) -> str: + """Return a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__( + self, + *, + credentials: Optional[credentials.Credentials] = None, + transport: Union[str, InstanceAdminTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the instance admin client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.InstanceAdminTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (client_options_lib.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool( + util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + ) + + ssl_credentials = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + import grpc # type: ignore + + cert, key = client_options.client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + is_mtls = True + else: + creds = SslCredentials() + is_mtls = creds.is_mtls + ssl_credentials = creds.ssl_credentials if is_mtls else None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, InstanceAdminTransport): + # transport is a InstanceAdminTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + ssl_channel_credentials=ssl_credentials, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def list_instance_configs( + self, + request: spanner_instance_admin.ListInstanceConfigsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListInstanceConfigsPager: + r"""Lists the supported instance configurations for a + given project. + + Args: + request (:class:`~.spanner_instance_admin.ListInstanceConfigsRequest`): + The request object. The request for + [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]. + parent (:class:`str`): + Required. The name of the project for which a list of + supported instance configurations is requested. Values + are of the form ``projects/``. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListInstanceConfigsPager: + The response for + [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a spanner_instance_admin.ListInstanceConfigsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, spanner_instance_admin.ListInstanceConfigsRequest): + request = spanner_instance_admin.ListInstanceConfigsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_instance_configs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListInstanceConfigsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def get_instance_config( + self, + request: spanner_instance_admin.GetInstanceConfigRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> spanner_instance_admin.InstanceConfig: + r"""Gets information about a particular instance + configuration. + + Args: + request (:class:`~.spanner_instance_admin.GetInstanceConfigRequest`): + The request object. The request for + [GetInstanceConfigRequest][google.spanner.admin.instance.v1.InstanceAdmin.GetInstanceConfig]. + name (:class:`str`): + Required. The name of the requested instance + configuration. Values are of the form + ``projects//instanceConfigs/``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.spanner_instance_admin.InstanceConfig: + A possible configuration for a Cloud + Spanner instance. Configurations define + the geographic placement of nodes and + their replication. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a spanner_instance_admin.GetInstanceConfigRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, spanner_instance_admin.GetInstanceConfigRequest): + request = spanner_instance_admin.GetInstanceConfigRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_instance_config] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_instances( + self, + request: spanner_instance_admin.ListInstancesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListInstancesPager: + r"""Lists all instances in the given project. + + Args: + request (:class:`~.spanner_instance_admin.ListInstancesRequest`): + The request object. The request for + [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances]. + parent (:class:`str`): + Required. The name of the project for which a list of + instances is requested. Values are of the form + ``projects/``. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListInstancesPager: + The response for + [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a spanner_instance_admin.ListInstancesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, spanner_instance_admin.ListInstancesRequest): + request = spanner_instance_admin.ListInstancesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_instances] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListInstancesPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def get_instance( + self, + request: spanner_instance_admin.GetInstanceRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> spanner_instance_admin.Instance: + r"""Gets information about a particular instance. + + Args: + request (:class:`~.spanner_instance_admin.GetInstanceRequest`): + The request object. The request for + [GetInstance][google.spanner.admin.instance.v1.InstanceAdmin.GetInstance]. + name (:class:`str`): + Required. The name of the requested instance. Values are + of the form ``projects//instances/``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.spanner_instance_admin.Instance: + An isolated set of Cloud Spanner + resources on which databases can be + hosted. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a spanner_instance_admin.GetInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, spanner_instance_admin.GetInstanceRequest): + request = spanner_instance_admin.GetInstanceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_instance] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def create_instance( + self, + request: spanner_instance_admin.CreateInstanceRequest = None, + *, + parent: str = None, + instance_id: str = None, + instance: spanner_instance_admin.Instance = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Creates an instance and begins preparing it to begin serving. + The returned [long-running + operation][google.longrunning.Operation] can be used to track + the progress of preparing the new instance. The instance name is + assigned by the caller. If the named instance already exists, + ``CreateInstance`` returns ``ALREADY_EXISTS``. + + Immediately upon completion of this request: + + - The instance is readable via the API, with all requested + attributes but no allocated resources. Its state is + ``CREATING``. + + Until completion of the returned operation: + + - Cancelling the operation renders the instance immediately + unreadable via the API. + - The instance can be deleted. + - All other attempts to modify the instance are rejected. + + Upon completion of the returned operation: + + - Billing for all successfully-allocated resources begins (some + types may have lower than the requested levels). + - Databases can be created in the instance. + - The instance's allocated resource levels are readable via the + API. + - The instance's state becomes ``READY``. + + The returned [long-running + operation][google.longrunning.Operation] will have a name of the + format ``/operations/`` and can be + used to track creation of the instance. The + [metadata][google.longrunning.Operation.metadata] field type is + [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata]. + The [response][google.longrunning.Operation.response] field type + is [Instance][google.spanner.admin.instance.v1.Instance], if + successful. + + Args: + request (:class:`~.spanner_instance_admin.CreateInstanceRequest`): + The request object. The request for + [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]. + parent (:class:`str`): + Required. The name of the project in which to create the + instance. Values are of the form ``projects/``. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_id (:class:`str`): + Required. The ID of the instance to create. Valid + identifiers are of the form ``[a-z][-a-z0-9]*[a-z0-9]`` + and must be between 2 and 64 characters in length. + This corresponds to the ``instance_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (:class:`~.spanner_instance_admin.Instance`): + Required. The instance to create. The name may be + omitted, but if specified must be + ``/instances/``. + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.spanner_instance_admin.Instance``: An + isolated set of Cloud Spanner resources on which + databases can be hosted. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, instance_id, instance]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a spanner_instance_admin.CreateInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, spanner_instance_admin.CreateInstanceRequest): + request = spanner_instance_admin.CreateInstanceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if instance_id is not None: + request.instance_id = instance_id + if instance is not None: + request.instance = instance + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_instance] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + spanner_instance_admin.Instance, + metadata_type=spanner_instance_admin.CreateInstanceMetadata, + ) + + # Done; return the response. + return response + + def update_instance( + self, + request: spanner_instance_admin.UpdateInstanceRequest = None, + *, + instance: spanner_instance_admin.Instance = None, + field_mask: gp_field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Updates an instance, and begins allocating or releasing + resources as requested. The returned [long-running + operation][google.longrunning.Operation] can be used to track + the progress of updating the instance. If the named instance + does not exist, returns ``NOT_FOUND``. + + Immediately upon completion of this request: + + - For resource types for which a decrease in the instance's + allocation has been requested, billing is based on the + newly-requested level. + + Until completion of the returned operation: + + - Cancelling the operation sets its metadata's + [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time], + and begins restoring resources to their pre-request values. + The operation is guaranteed to succeed at undoing all + resource changes, after which point it terminates with a + ``CANCELLED`` status. + - All other attempts to modify the instance are rejected. + - Reading the instance via the API continues to give the + pre-request resource levels. + + Upon completion of the returned operation: + + - Billing begins for all successfully-allocated resources (some + types may have lower than the requested levels). + - All newly-reserved resources are available for serving the + instance's tables. + - The instance's new resource levels are readable via the API. + + The returned [long-running + operation][google.longrunning.Operation] will have a name of the + format ``/operations/`` and can be + used to track the instance modification. The + [metadata][google.longrunning.Operation.metadata] field type is + [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata]. + The [response][google.longrunning.Operation.response] field type + is [Instance][google.spanner.admin.instance.v1.Instance], if + successful. + + Authorization requires ``spanner.instances.update`` permission + on resource + [name][google.spanner.admin.instance.v1.Instance.name]. + + Args: + request (:class:`~.spanner_instance_admin.UpdateInstanceRequest`): + The request object. The request for + [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]. + instance (:class:`~.spanner_instance_admin.Instance`): + Required. The instance to update, which must always + include the instance name. Otherwise, only fields + mentioned in + [field_mask][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask] + need be included. + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + field_mask (:class:`~.gp_field_mask.FieldMask`): + Required. A mask specifying which fields in + [Instance][google.spanner.admin.instance.v1.Instance] + should be updated. The field mask must always be + specified; this prevents any future fields in + [Instance][google.spanner.admin.instance.v1.Instance] + from being erased accidentally by clients that do not + know about them. + This corresponds to the ``field_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.spanner_instance_admin.Instance``: An + isolated set of Cloud Spanner resources on which + databases can be hosted. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([instance, field_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a spanner_instance_admin.UpdateInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, spanner_instance_admin.UpdateInstanceRequest): + request = spanner_instance_admin.UpdateInstanceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if instance is not None: + request.instance = instance + if field_mask is not None: + request.field_mask = field_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_instance] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("instance.name", request.instance.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + spanner_instance_admin.Instance, + metadata_type=spanner_instance_admin.UpdateInstanceMetadata, + ) + + # Done; return the response. + return response + + def delete_instance( + self, + request: spanner_instance_admin.DeleteInstanceRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes an instance. + + Immediately upon completion of the request: + + - Billing ceases for all of the instance's reserved resources. + + Soon afterward: + + - The instance and *all of its databases* immediately and + irrevocably disappear from the API. All data in the databases + is permanently deleted. + + Args: + request (:class:`~.spanner_instance_admin.DeleteInstanceRequest`): + The request object. The request for + [DeleteInstance][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstance]. + name (:class:`str`): + Required. The name of the instance to be deleted. Values + are of the form + ``projects//instances/`` + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a spanner_instance_admin.DeleteInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, spanner_instance_admin.DeleteInstanceRequest): + request = spanner_instance_admin.DeleteInstanceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_instance] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + def set_iam_policy( + self, + request: iam_policy.SetIamPolicyRequest = None, + *, + resource: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy.Policy: + r"""Sets the access control policy on an instance resource. Replaces + any existing policy. + + Authorization requires ``spanner.instances.setIamPolicy`` on + [resource][google.iam.v1.SetIamPolicyRequest.resource]. + + Args: + request (:class:`~.iam_policy.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy is being specified. See the + operation documentation for the + appropriate value for this field. + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.policy.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.SetIamPolicyRequest(**request) + + elif not request: + request = iam_policy.SetIamPolicyRequest(resource=resource,) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: iam_policy.GetIamPolicyRequest = None, + *, + resource: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy.Policy: + r"""Gets the access control policy for an instance resource. Returns + an empty policy if an instance exists but does not have a policy + set. + + Authorization requires ``spanner.instances.getIamPolicy`` on + [resource][google.iam.v1.GetIamPolicyRequest.resource]. + + Args: + request (:class:`~.iam_policy.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy is being requested. See the + operation documentation for the + appropriate value for this field. + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.policy.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.GetIamPolicyRequest(**request) + + elif not request: + request = iam_policy.GetIamPolicyRequest(resource=resource,) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy.TestIamPermissionsRequest = None, + *, + resource: str = None, + permissions: Sequence[str] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy.TestIamPermissionsResponse: + r"""Returns permissions that the caller has on the specified + instance resource. + + Attempting this RPC on a non-existent Cloud Spanner instance + resource will result in a NOT_FOUND error if the user has + ``spanner.instances.list`` permission on the containing Google + Cloud Project. Otherwise returns an empty set of permissions. + + Args: + request (:class:`~.iam_policy.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy detail is being requested. See + the operation documentation for the + appropriate value for this field. + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + permissions (:class:`Sequence[str]`): + The set of permissions to check for the ``resource``. + Permissions with wildcards (such as '*' or 'storage.*') + are not allowed. For more information see `IAM + Overview `__. + This corresponds to the ``permissions`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.iam_policy.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource, permissions]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.TestIamPermissionsRequest(**request) + + elif not request: + request = iam_policy.TestIamPermissionsRequest( + resource=resource, permissions=permissions, + ) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-spanner-admin-instance", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("InstanceAdminClient",) diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/pagers.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/pagers.py new file mode 100644 index 0000000000..0cb1ea3643 --- /dev/null +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/pagers.py @@ -0,0 +1,282 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple + +from google.cloud.spanner_admin_instance_v1.types import spanner_instance_admin + + +class ListInstanceConfigsPager: + """A pager for iterating through ``list_instance_configs`` requests. + + This class thinly wraps an initial + :class:`~.spanner_instance_admin.ListInstanceConfigsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``instance_configs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListInstanceConfigs`` requests and continue to iterate + through the ``instance_configs`` field on the + corresponding responses. + + All the usual :class:`~.spanner_instance_admin.ListInstanceConfigsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., spanner_instance_admin.ListInstanceConfigsResponse], + request: spanner_instance_admin.ListInstanceConfigsRequest, + response: spanner_instance_admin.ListInstanceConfigsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.spanner_instance_admin.ListInstanceConfigsRequest`): + The initial request object. + response (:class:`~.spanner_instance_admin.ListInstanceConfigsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = spanner_instance_admin.ListInstanceConfigsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[spanner_instance_admin.ListInstanceConfigsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[spanner_instance_admin.InstanceConfig]: + for page in self.pages: + yield from page.instance_configs + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListInstanceConfigsAsyncPager: + """A pager for iterating through ``list_instance_configs`` requests. + + This class thinly wraps an initial + :class:`~.spanner_instance_admin.ListInstanceConfigsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``instance_configs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListInstanceConfigs`` requests and continue to iterate + through the ``instance_configs`` field on the + corresponding responses. + + All the usual :class:`~.spanner_instance_admin.ListInstanceConfigsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[spanner_instance_admin.ListInstanceConfigsResponse] + ], + request: spanner_instance_admin.ListInstanceConfigsRequest, + response: spanner_instance_admin.ListInstanceConfigsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.spanner_instance_admin.ListInstanceConfigsRequest`): + The initial request object. + response (:class:`~.spanner_instance_admin.ListInstanceConfigsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = spanner_instance_admin.ListInstanceConfigsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterable[spanner_instance_admin.ListInstanceConfigsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[spanner_instance_admin.InstanceConfig]: + async def async_generator(): + async for page in self.pages: + for response in page.instance_configs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListInstancesPager: + """A pager for iterating through ``list_instances`` requests. + + This class thinly wraps an initial + :class:`~.spanner_instance_admin.ListInstancesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``instances`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListInstances`` requests and continue to iterate + through the ``instances`` field on the + corresponding responses. + + All the usual :class:`~.spanner_instance_admin.ListInstancesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., spanner_instance_admin.ListInstancesResponse], + request: spanner_instance_admin.ListInstancesRequest, + response: spanner_instance_admin.ListInstancesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.spanner_instance_admin.ListInstancesRequest`): + The initial request object. + response (:class:`~.spanner_instance_admin.ListInstancesResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = spanner_instance_admin.ListInstancesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[spanner_instance_admin.ListInstancesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[spanner_instance_admin.Instance]: + for page in self.pages: + yield from page.instances + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListInstancesAsyncPager: + """A pager for iterating through ``list_instances`` requests. + + This class thinly wraps an initial + :class:`~.spanner_instance_admin.ListInstancesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``instances`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListInstances`` requests and continue to iterate + through the ``instances`` field on the + corresponding responses. + + All the usual :class:`~.spanner_instance_admin.ListInstancesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[spanner_instance_admin.ListInstancesResponse]], + request: spanner_instance_admin.ListInstancesRequest, + response: spanner_instance_admin.ListInstancesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.spanner_instance_admin.ListInstancesRequest`): + The initial request object. + response (:class:`~.spanner_instance_admin.ListInstancesResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = spanner_instance_admin.ListInstancesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterable[spanner_instance_admin.ListInstancesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[spanner_instance_admin.Instance]: + async def async_generator(): + async for page in self.pages: + for response in page.instances: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/__init__.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/__init__.py new file mode 100644 index 0000000000..2b8e6a24b6 --- /dev/null +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/__init__.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import InstanceAdminTransport +from .grpc import InstanceAdminGrpcTransport +from .grpc_asyncio import InstanceAdminGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[InstanceAdminTransport]] +_transport_registry["grpc"] = InstanceAdminGrpcTransport +_transport_registry["grpc_asyncio"] = InstanceAdminGrpcAsyncIOTransport + + +__all__ = ( + "InstanceAdminTransport", + "InstanceAdminGrpcTransport", + "InstanceAdminGrpcAsyncIOTransport", +) diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/base.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/base.py new file mode 100644 index 0000000000..fa07b95eeb --- /dev/null +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/base.py @@ -0,0 +1,322 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.spanner_admin_instance_v1.types import spanner_instance_admin +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.longrunning import operations_pb2 as operations # type: ignore +from google.protobuf import empty_pb2 as empty # type: ignore + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-spanner-admin-instance", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class InstanceAdminTransport(abc.ABC): + """Abstract transport class for InstanceAdmin.""" + + AUTH_SCOPES = ( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.admin", + ) + + def __init__( + self, + *, + host: str = "spanner.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages(client_info) + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.list_instance_configs: gapic_v1.method.wrap_method( + self.list_instance_configs, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=3600.0, + client_info=client_info, + ), + self.get_instance_config: gapic_v1.method.wrap_method( + self.get_instance_config, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=3600.0, + client_info=client_info, + ), + self.list_instances: gapic_v1.method.wrap_method( + self.list_instances, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=3600.0, + client_info=client_info, + ), + self.get_instance: gapic_v1.method.wrap_method( + self.get_instance, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=3600.0, + client_info=client_info, + ), + self.create_instance: gapic_v1.method.wrap_method( + self.create_instance, default_timeout=3600.0, client_info=client_info, + ), + self.update_instance: gapic_v1.method.wrap_method( + self.update_instance, default_timeout=3600.0, client_info=client_info, + ), + self.delete_instance: gapic_v1.method.wrap_method( + self.delete_instance, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=3600.0, + client_info=client_info, + ), + self.set_iam_policy: gapic_v1.method.wrap_method( + self.set_iam_policy, default_timeout=30.0, client_info=client_info, + ), + self.get_iam_policy: gapic_v1.method.wrap_method( + self.get_iam_policy, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.test_iam_permissions: gapic_v1.method.wrap_method( + self.test_iam_permissions, + default_timeout=30.0, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def list_instance_configs( + self, + ) -> typing.Callable[ + [spanner_instance_admin.ListInstanceConfigsRequest], + typing.Union[ + spanner_instance_admin.ListInstanceConfigsResponse, + typing.Awaitable[spanner_instance_admin.ListInstanceConfigsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_instance_config( + self, + ) -> typing.Callable[ + [spanner_instance_admin.GetInstanceConfigRequest], + typing.Union[ + spanner_instance_admin.InstanceConfig, + typing.Awaitable[spanner_instance_admin.InstanceConfig], + ], + ]: + raise NotImplementedError() + + @property + def list_instances( + self, + ) -> typing.Callable[ + [spanner_instance_admin.ListInstancesRequest], + typing.Union[ + spanner_instance_admin.ListInstancesResponse, + typing.Awaitable[spanner_instance_admin.ListInstancesResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_instance( + self, + ) -> typing.Callable[ + [spanner_instance_admin.GetInstanceRequest], + typing.Union[ + spanner_instance_admin.Instance, + typing.Awaitable[spanner_instance_admin.Instance], + ], + ]: + raise NotImplementedError() + + @property + def create_instance( + self, + ) -> typing.Callable[ + [spanner_instance_admin.CreateInstanceRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def update_instance( + self, + ) -> typing.Callable[ + [spanner_instance_admin.UpdateInstanceRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def delete_instance( + self, + ) -> typing.Callable[ + [spanner_instance_admin.DeleteInstanceRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> typing.Callable[ + [iam_policy.SetIamPolicyRequest], + typing.Union[policy.Policy, typing.Awaitable[policy.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> typing.Callable[ + [iam_policy.GetIamPolicyRequest], + typing.Union[policy.Policy, typing.Awaitable[policy.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> typing.Callable[ + [iam_policy.TestIamPermissionsRequest], + typing.Union[ + iam_policy.TestIamPermissionsResponse, + typing.Awaitable[iam_policy.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + +__all__ = ("InstanceAdminTransport",) diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc.py new file mode 100644 index 0000000000..a758bb6ad4 --- /dev/null +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc.py @@ -0,0 +1,651 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.spanner_admin_instance_v1.types import spanner_instance_admin +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.longrunning import operations_pb2 as operations # type: ignore +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import InstanceAdminTransport, DEFAULT_CLIENT_INFO + + +class InstanceAdminGrpcTransport(InstanceAdminTransport): + """gRPC backend transport for InstanceAdmin. + + Cloud Spanner Instance Admin API + The Cloud Spanner Instance Admin API can be used to create, + delete, modify and list instances. Instances are dedicated Cloud + Spanner serving and storage resources to be used by Cloud + Spanner databases. + Each instance has a "configuration", which dictates where the + serving resources for the Cloud Spanner instance are located + (e.g., US-central, Europe). Configurations are created by Google + based on resource availability. + + Cloud Spanner billing is based on the instances that exist and + their sizes. After an instance exists, there are no additional + per-database or per-operation charges for use of the instance + (though there may be additional network bandwidth charges). + Instances offer isolation: problems with databases in one + instance will not affect other instances. However, within an + instance databases can affect each other. For example, if one + database in an instance receives a lot of requests and consumes + most of the instance resources, fewer resources are available + for other databases in that instance, and their performance may + suffer. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "spanner.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} # type: Dict[str, Callable] + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + @classmethod + def create_channel( + cls, + host: str = "spanner.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optionsl[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if "operations_client" not in self.__dict__: + self.__dict__["operations_client"] = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self.__dict__["operations_client"] + + @property + def list_instance_configs( + self, + ) -> Callable[ + [spanner_instance_admin.ListInstanceConfigsRequest], + spanner_instance_admin.ListInstanceConfigsResponse, + ]: + r"""Return a callable for the list instance configs method over gRPC. + + Lists the supported instance configurations for a + given project. + + Returns: + Callable[[~.ListInstanceConfigsRequest], + ~.ListInstanceConfigsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_instance_configs" not in self._stubs: + self._stubs["list_instance_configs"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstanceConfigs", + request_serializer=spanner_instance_admin.ListInstanceConfigsRequest.serialize, + response_deserializer=spanner_instance_admin.ListInstanceConfigsResponse.deserialize, + ) + return self._stubs["list_instance_configs"] + + @property + def get_instance_config( + self, + ) -> Callable[ + [spanner_instance_admin.GetInstanceConfigRequest], + spanner_instance_admin.InstanceConfig, + ]: + r"""Return a callable for the get instance config method over gRPC. + + Gets information about a particular instance + configuration. + + Returns: + Callable[[~.GetInstanceConfigRequest], + ~.InstanceConfig]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_instance_config" not in self._stubs: + self._stubs["get_instance_config"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstanceConfig", + request_serializer=spanner_instance_admin.GetInstanceConfigRequest.serialize, + response_deserializer=spanner_instance_admin.InstanceConfig.deserialize, + ) + return self._stubs["get_instance_config"] + + @property + def list_instances( + self, + ) -> Callable[ + [spanner_instance_admin.ListInstancesRequest], + spanner_instance_admin.ListInstancesResponse, + ]: + r"""Return a callable for the list instances method over gRPC. + + Lists all instances in the given project. + + Returns: + Callable[[~.ListInstancesRequest], + ~.ListInstancesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_instances" not in self._stubs: + self._stubs["list_instances"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstances", + request_serializer=spanner_instance_admin.ListInstancesRequest.serialize, + response_deserializer=spanner_instance_admin.ListInstancesResponse.deserialize, + ) + return self._stubs["list_instances"] + + @property + def get_instance( + self, + ) -> Callable[ + [spanner_instance_admin.GetInstanceRequest], spanner_instance_admin.Instance + ]: + r"""Return a callable for the get instance method over gRPC. + + Gets information about a particular instance. + + Returns: + Callable[[~.GetInstanceRequest], + ~.Instance]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_instance" not in self._stubs: + self._stubs["get_instance"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstance", + request_serializer=spanner_instance_admin.GetInstanceRequest.serialize, + response_deserializer=spanner_instance_admin.Instance.deserialize, + ) + return self._stubs["get_instance"] + + @property + def create_instance( + self, + ) -> Callable[[spanner_instance_admin.CreateInstanceRequest], operations.Operation]: + r"""Return a callable for the create instance method over gRPC. + + Creates an instance and begins preparing it to begin serving. + The returned [long-running + operation][google.longrunning.Operation] can be used to track + the progress of preparing the new instance. The instance name is + assigned by the caller. If the named instance already exists, + ``CreateInstance`` returns ``ALREADY_EXISTS``. + + Immediately upon completion of this request: + + - The instance is readable via the API, with all requested + attributes but no allocated resources. Its state is + ``CREATING``. + + Until completion of the returned operation: + + - Cancelling the operation renders the instance immediately + unreadable via the API. + - The instance can be deleted. + - All other attempts to modify the instance are rejected. + + Upon completion of the returned operation: + + - Billing for all successfully-allocated resources begins (some + types may have lower than the requested levels). + - Databases can be created in the instance. + - The instance's allocated resource levels are readable via the + API. + - The instance's state becomes ``READY``. + + The returned [long-running + operation][google.longrunning.Operation] will have a name of the + format ``/operations/`` and can be + used to track creation of the instance. The + [metadata][google.longrunning.Operation.metadata] field type is + [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata]. + The [response][google.longrunning.Operation.response] field type + is [Instance][google.spanner.admin.instance.v1.Instance], if + successful. + + Returns: + Callable[[~.CreateInstanceRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_instance" not in self._stubs: + self._stubs["create_instance"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.instance.v1.InstanceAdmin/CreateInstance", + request_serializer=spanner_instance_admin.CreateInstanceRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_instance"] + + @property + def update_instance( + self, + ) -> Callable[[spanner_instance_admin.UpdateInstanceRequest], operations.Operation]: + r"""Return a callable for the update instance method over gRPC. + + Updates an instance, and begins allocating or releasing + resources as requested. The returned [long-running + operation][google.longrunning.Operation] can be used to track + the progress of updating the instance. If the named instance + does not exist, returns ``NOT_FOUND``. + + Immediately upon completion of this request: + + - For resource types for which a decrease in the instance's + allocation has been requested, billing is based on the + newly-requested level. + + Until completion of the returned operation: + + - Cancelling the operation sets its metadata's + [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time], + and begins restoring resources to their pre-request values. + The operation is guaranteed to succeed at undoing all + resource changes, after which point it terminates with a + ``CANCELLED`` status. + - All other attempts to modify the instance are rejected. + - Reading the instance via the API continues to give the + pre-request resource levels. + + Upon completion of the returned operation: + + - Billing begins for all successfully-allocated resources (some + types may have lower than the requested levels). + - All newly-reserved resources are available for serving the + instance's tables. + - The instance's new resource levels are readable via the API. + + The returned [long-running + operation][google.longrunning.Operation] will have a name of the + format ``/operations/`` and can be + used to track the instance modification. The + [metadata][google.longrunning.Operation.metadata] field type is + [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata]. + The [response][google.longrunning.Operation.response] field type + is [Instance][google.spanner.admin.instance.v1.Instance], if + successful. + + Authorization requires ``spanner.instances.update`` permission + on resource + [name][google.spanner.admin.instance.v1.Instance.name]. + + Returns: + Callable[[~.UpdateInstanceRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_instance" not in self._stubs: + self._stubs["update_instance"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstance", + request_serializer=spanner_instance_admin.UpdateInstanceRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["update_instance"] + + @property + def delete_instance( + self, + ) -> Callable[[spanner_instance_admin.DeleteInstanceRequest], empty.Empty]: + r"""Return a callable for the delete instance method over gRPC. + + Deletes an instance. + + Immediately upon completion of the request: + + - Billing ceases for all of the instance's reserved resources. + + Soon afterward: + + - The instance and *all of its databases* immediately and + irrevocably disappear from the API. All data in the databases + is permanently deleted. + + Returns: + Callable[[~.DeleteInstanceRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_instance" not in self._stubs: + self._stubs["delete_instance"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstance", + request_serializer=spanner_instance_admin.DeleteInstanceRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_instance"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy.SetIamPolicyRequest], policy.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + + Sets the access control policy on an instance resource. Replaces + any existing policy. + + Authorization requires ``spanner.instances.setIamPolicy`` on + [resource][google.iam.v1.SetIamPolicyRequest.resource]. + + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.instance.v1.InstanceAdmin/SetIamPolicy", + request_serializer=iam_policy.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy.GetIamPolicyRequest], policy.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + + Gets the access control policy for an instance resource. Returns + an empty policy if an instance exists but does not have a policy + set. + + Authorization requires ``spanner.instances.getIamPolicy`` on + [resource][google.iam.v1.GetIamPolicyRequest.resource]. + + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.instance.v1.InstanceAdmin/GetIamPolicy", + request_serializer=iam_policy.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy.TestIamPermissionsRequest], iam_policy.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + + Returns permissions that the caller has on the specified + instance resource. + + Attempting this RPC on a non-existent Cloud Spanner instance + resource will result in a NOT_FOUND error if the user has + ``spanner.instances.list`` permission on the containing Google + Cloud Project. Otherwise returns an empty set of permissions. + + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.instance.v1.InstanceAdmin/TestIamPermissions", + request_serializer=iam_policy.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ("InstanceAdminGrpcTransport",) diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc_asyncio.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc_asyncio.py new file mode 100644 index 0000000000..91fb40d1e7 --- /dev/null +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc_asyncio.py @@ -0,0 +1,663 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.spanner_admin_instance_v1.types import spanner_instance_admin +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.longrunning import operations_pb2 as operations # type: ignore +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import InstanceAdminTransport, DEFAULT_CLIENT_INFO +from .grpc import InstanceAdminGrpcTransport + + +class InstanceAdminGrpcAsyncIOTransport(InstanceAdminTransport): + """gRPC AsyncIO backend transport for InstanceAdmin. + + Cloud Spanner Instance Admin API + The Cloud Spanner Instance Admin API can be used to create, + delete, modify and list instances. Instances are dedicated Cloud + Spanner serving and storage resources to be used by Cloud + Spanner databases. + Each instance has a "configuration", which dictates where the + serving resources for the Cloud Spanner instance are located + (e.g., US-central, Europe). Configurations are created by Google + based on resource availability. + + Cloud Spanner billing is based on the instances that exist and + their sizes. After an instance exists, there are no additional + per-database or per-operation charges for use of the instance + (though there may be additional network bandwidth charges). + Instances offer isolation: problems with databases in one + instance will not affect other instances. However, within an + instance databases can affect each other. For example, if one + database in an instance receives a lot of requests and consumes + most of the instance resources, fewer resources are available + for other databases in that instance, and their performance may + suffer. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "spanner.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "spanner.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + self._stubs = {} + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if "operations_client" not in self.__dict__: + self.__dict__["operations_client"] = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self.__dict__["operations_client"] + + @property + def list_instance_configs( + self, + ) -> Callable[ + [spanner_instance_admin.ListInstanceConfigsRequest], + Awaitable[spanner_instance_admin.ListInstanceConfigsResponse], + ]: + r"""Return a callable for the list instance configs method over gRPC. + + Lists the supported instance configurations for a + given project. + + Returns: + Callable[[~.ListInstanceConfigsRequest], + Awaitable[~.ListInstanceConfigsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_instance_configs" not in self._stubs: + self._stubs["list_instance_configs"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstanceConfigs", + request_serializer=spanner_instance_admin.ListInstanceConfigsRequest.serialize, + response_deserializer=spanner_instance_admin.ListInstanceConfigsResponse.deserialize, + ) + return self._stubs["list_instance_configs"] + + @property + def get_instance_config( + self, + ) -> Callable[ + [spanner_instance_admin.GetInstanceConfigRequest], + Awaitable[spanner_instance_admin.InstanceConfig], + ]: + r"""Return a callable for the get instance config method over gRPC. + + Gets information about a particular instance + configuration. + + Returns: + Callable[[~.GetInstanceConfigRequest], + Awaitable[~.InstanceConfig]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_instance_config" not in self._stubs: + self._stubs["get_instance_config"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstanceConfig", + request_serializer=spanner_instance_admin.GetInstanceConfigRequest.serialize, + response_deserializer=spanner_instance_admin.InstanceConfig.deserialize, + ) + return self._stubs["get_instance_config"] + + @property + def list_instances( + self, + ) -> Callable[ + [spanner_instance_admin.ListInstancesRequest], + Awaitable[spanner_instance_admin.ListInstancesResponse], + ]: + r"""Return a callable for the list instances method over gRPC. + + Lists all instances in the given project. + + Returns: + Callable[[~.ListInstancesRequest], + Awaitable[~.ListInstancesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_instances" not in self._stubs: + self._stubs["list_instances"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstances", + request_serializer=spanner_instance_admin.ListInstancesRequest.serialize, + response_deserializer=spanner_instance_admin.ListInstancesResponse.deserialize, + ) + return self._stubs["list_instances"] + + @property + def get_instance( + self, + ) -> Callable[ + [spanner_instance_admin.GetInstanceRequest], + Awaitable[spanner_instance_admin.Instance], + ]: + r"""Return a callable for the get instance method over gRPC. + + Gets information about a particular instance. + + Returns: + Callable[[~.GetInstanceRequest], + Awaitable[~.Instance]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_instance" not in self._stubs: + self._stubs["get_instance"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstance", + request_serializer=spanner_instance_admin.GetInstanceRequest.serialize, + response_deserializer=spanner_instance_admin.Instance.deserialize, + ) + return self._stubs["get_instance"] + + @property + def create_instance( + self, + ) -> Callable[ + [spanner_instance_admin.CreateInstanceRequest], Awaitable[operations.Operation] + ]: + r"""Return a callable for the create instance method over gRPC. + + Creates an instance and begins preparing it to begin serving. + The returned [long-running + operation][google.longrunning.Operation] can be used to track + the progress of preparing the new instance. The instance name is + assigned by the caller. If the named instance already exists, + ``CreateInstance`` returns ``ALREADY_EXISTS``. + + Immediately upon completion of this request: + + - The instance is readable via the API, with all requested + attributes but no allocated resources. Its state is + ``CREATING``. + + Until completion of the returned operation: + + - Cancelling the operation renders the instance immediately + unreadable via the API. + - The instance can be deleted. + - All other attempts to modify the instance are rejected. + + Upon completion of the returned operation: + + - Billing for all successfully-allocated resources begins (some + types may have lower than the requested levels). + - Databases can be created in the instance. + - The instance's allocated resource levels are readable via the + API. + - The instance's state becomes ``READY``. + + The returned [long-running + operation][google.longrunning.Operation] will have a name of the + format ``/operations/`` and can be + used to track creation of the instance. The + [metadata][google.longrunning.Operation.metadata] field type is + [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata]. + The [response][google.longrunning.Operation.response] field type + is [Instance][google.spanner.admin.instance.v1.Instance], if + successful. + + Returns: + Callable[[~.CreateInstanceRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_instance" not in self._stubs: + self._stubs["create_instance"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.instance.v1.InstanceAdmin/CreateInstance", + request_serializer=spanner_instance_admin.CreateInstanceRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_instance"] + + @property + def update_instance( + self, + ) -> Callable[ + [spanner_instance_admin.UpdateInstanceRequest], Awaitable[operations.Operation] + ]: + r"""Return a callable for the update instance method over gRPC. + + Updates an instance, and begins allocating or releasing + resources as requested. The returned [long-running + operation][google.longrunning.Operation] can be used to track + the progress of updating the instance. If the named instance + does not exist, returns ``NOT_FOUND``. + + Immediately upon completion of this request: + + - For resource types for which a decrease in the instance's + allocation has been requested, billing is based on the + newly-requested level. + + Until completion of the returned operation: + + - Cancelling the operation sets its metadata's + [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time], + and begins restoring resources to their pre-request values. + The operation is guaranteed to succeed at undoing all + resource changes, after which point it terminates with a + ``CANCELLED`` status. + - All other attempts to modify the instance are rejected. + - Reading the instance via the API continues to give the + pre-request resource levels. + + Upon completion of the returned operation: + + - Billing begins for all successfully-allocated resources (some + types may have lower than the requested levels). + - All newly-reserved resources are available for serving the + instance's tables. + - The instance's new resource levels are readable via the API. + + The returned [long-running + operation][google.longrunning.Operation] will have a name of the + format ``/operations/`` and can be + used to track the instance modification. The + [metadata][google.longrunning.Operation.metadata] field type is + [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata]. + The [response][google.longrunning.Operation.response] field type + is [Instance][google.spanner.admin.instance.v1.Instance], if + successful. + + Authorization requires ``spanner.instances.update`` permission + on resource + [name][google.spanner.admin.instance.v1.Instance.name]. + + Returns: + Callable[[~.UpdateInstanceRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_instance" not in self._stubs: + self._stubs["update_instance"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstance", + request_serializer=spanner_instance_admin.UpdateInstanceRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["update_instance"] + + @property + def delete_instance( + self, + ) -> Callable[ + [spanner_instance_admin.DeleteInstanceRequest], Awaitable[empty.Empty] + ]: + r"""Return a callable for the delete instance method over gRPC. + + Deletes an instance. + + Immediately upon completion of the request: + + - Billing ceases for all of the instance's reserved resources. + + Soon afterward: + + - The instance and *all of its databases* immediately and + irrevocably disappear from the API. All data in the databases + is permanently deleted. + + Returns: + Callable[[~.DeleteInstanceRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_instance" not in self._stubs: + self._stubs["delete_instance"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstance", + request_serializer=spanner_instance_admin.DeleteInstanceRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_instance"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy.SetIamPolicyRequest], Awaitable[policy.Policy]]: + r"""Return a callable for the set iam policy method over gRPC. + + Sets the access control policy on an instance resource. Replaces + any existing policy. + + Authorization requires ``spanner.instances.setIamPolicy`` on + [resource][google.iam.v1.SetIamPolicyRequest.resource]. + + Returns: + Callable[[~.SetIamPolicyRequest], + Awaitable[~.Policy]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.instance.v1.InstanceAdmin/SetIamPolicy", + request_serializer=iam_policy.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy.GetIamPolicyRequest], Awaitable[policy.Policy]]: + r"""Return a callable for the get iam policy method over gRPC. + + Gets the access control policy for an instance resource. Returns + an empty policy if an instance exists but does not have a policy + set. + + Authorization requires ``spanner.instances.getIamPolicy`` on + [resource][google.iam.v1.GetIamPolicyRequest.resource]. + + Returns: + Callable[[~.GetIamPolicyRequest], + Awaitable[~.Policy]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.instance.v1.InstanceAdmin/GetIamPolicy", + request_serializer=iam_policy.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy.TestIamPermissionsRequest], + Awaitable[iam_policy.TestIamPermissionsResponse], + ]: + r"""Return a callable for the test iam permissions method over gRPC. + + Returns permissions that the caller has on the specified + instance resource. + + Attempting this RPC on a non-existent Cloud Spanner instance + resource will result in a NOT_FOUND error if the user has + ``spanner.instances.list`` permission on the containing Google + Cloud Project. Otherwise returns an empty set of permissions. + + Returns: + Callable[[~.TestIamPermissionsRequest], + Awaitable[~.TestIamPermissionsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.instance.v1.InstanceAdmin/TestIamPermissions", + request_serializer=iam_policy.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ("InstanceAdminGrpcAsyncIOTransport",) diff --git a/google/cloud/spanner_admin_instance_v1/types.py b/google/cloud/spanner_admin_instance_v1/types.py deleted file mode 100644 index a20b479bf0..0000000000 --- a/google/cloud/spanner_admin_instance_v1/types.py +++ /dev/null @@ -1,66 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -import sys - - -from google.api import http_pb2 -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import policy_pb2 -from google.iam.v1.logging import audit_data_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import any_pb2 -from google.protobuf import descriptor_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 -from google.protobuf import timestamp_pb2 -from google.rpc import status_pb2 - -from google.api_core.protobuf_helpers import get_messages -from google.cloud.spanner_admin_instance_v1.proto import spanner_instance_admin_pb2 - - -_shared_modules = [ - http_pb2, - iam_policy_pb2, - policy_pb2, - audit_data_pb2, - operations_pb2, - any_pb2, - descriptor_pb2, - empty_pb2, - field_mask_pb2, - timestamp_pb2, - status_pb2, -] - -_local_modules = [spanner_instance_admin_pb2] - -names = [] - -for module in _shared_modules: - for name, message in get_messages(module).items(): - setattr(sys.modules[__name__], name, message) - names.append(name) - -for module in _local_modules: - for name, message in get_messages(module).items(): - message.__module__ = "google.cloud.spanner_admin_instance_v1.types" - setattr(sys.modules[__name__], name, message) - names.append(name) - -__all__ = tuple(sorted(names)) diff --git a/google/cloud/spanner_admin_instance_v1/types/__init__.py b/google/cloud/spanner_admin_instance_v1/types/__init__.py new file mode 100644 index 0000000000..0f096f84c9 --- /dev/null +++ b/google/cloud/spanner_admin_instance_v1/types/__init__.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .spanner_instance_admin import ( + ReplicaInfo, + InstanceConfig, + Instance, + ListInstanceConfigsRequest, + ListInstanceConfigsResponse, + GetInstanceConfigRequest, + GetInstanceRequest, + CreateInstanceRequest, + ListInstancesRequest, + ListInstancesResponse, + UpdateInstanceRequest, + DeleteInstanceRequest, + CreateInstanceMetadata, + UpdateInstanceMetadata, +) + + +__all__ = ( + "ReplicaInfo", + "InstanceConfig", + "Instance", + "ListInstanceConfigsRequest", + "ListInstanceConfigsResponse", + "GetInstanceConfigRequest", + "GetInstanceRequest", + "CreateInstanceRequest", + "ListInstancesRequest", + "ListInstancesResponse", + "UpdateInstanceRequest", + "DeleteInstanceRequest", + "CreateInstanceMetadata", + "UpdateInstanceMetadata", +) diff --git a/google/cloud/spanner_admin_instance_v1/types/spanner_instance_admin.py b/google/cloud/spanner_admin_instance_v1/types/spanner_instance_admin.py new file mode 100644 index 0000000000..cf2dc11a33 --- /dev/null +++ b/google/cloud/spanner_admin_instance_v1/types/spanner_instance_admin.py @@ -0,0 +1,482 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import field_mask_pb2 as gp_field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.spanner.admin.instance.v1", + manifest={ + "ReplicaInfo", + "InstanceConfig", + "Instance", + "ListInstanceConfigsRequest", + "ListInstanceConfigsResponse", + "GetInstanceConfigRequest", + "GetInstanceRequest", + "CreateInstanceRequest", + "ListInstancesRequest", + "ListInstancesResponse", + "UpdateInstanceRequest", + "DeleteInstanceRequest", + "CreateInstanceMetadata", + "UpdateInstanceMetadata", + }, +) + + +class ReplicaInfo(proto.Message): + r""" + + Attributes: + location (str): + The location of the serving resources, e.g. + "us-central1". + type_ (~.spanner_instance_admin.ReplicaInfo.ReplicaType): + The type of replica. + default_leader_location (bool): + If true, this location is designated as the default leader + location where leader replicas are placed. See the `region + types + documentation `__ + for more details. + """ + + class ReplicaType(proto.Enum): + r"""Indicates the type of replica. See the `replica types + documentation `__ + for more details. + """ + TYPE_UNSPECIFIED = 0 + READ_WRITE = 1 + READ_ONLY = 2 + WITNESS = 3 + + location = proto.Field(proto.STRING, number=1) + + type_ = proto.Field(proto.ENUM, number=2, enum=ReplicaType,) + + default_leader_location = proto.Field(proto.BOOL, number=3) + + +class InstanceConfig(proto.Message): + r"""A possible configuration for a Cloud Spanner instance. + Configurations define the geographic placement of nodes and + their replication. + + Attributes: + name (str): + A unique identifier for the instance configuration. Values + are of the form + ``projects//instanceConfigs/[a-z][-a-z0-9]*`` + display_name (str): + The name of this instance configuration as it + appears in UIs. + replicas (Sequence[~.spanner_instance_admin.ReplicaInfo]): + The geographic placement of nodes in this + instance configuration and their replication + properties. + """ + + name = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=2) + + replicas = proto.RepeatedField(proto.MESSAGE, number=3, message="ReplicaInfo",) + + +class Instance(proto.Message): + r"""An isolated set of Cloud Spanner resources on which databases + can be hosted. + + Attributes: + name (str): + Required. A unique identifier for the instance, which cannot + be changed after the instance is created. Values are of the + form + ``projects//instances/[a-z][-a-z0-9]*[a-z0-9]``. + The final segment of the name must be between 2 and 64 + characters in length. + config (str): + Required. The name of the instance's configuration. Values + are of the form + ``projects//instanceConfigs/``. See + also + [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] + and + [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]. + display_name (str): + Required. The descriptive name for this + instance as it appears in UIs. Must be unique + per project and between 4 and 30 characters in + length. + node_count (int): + Required. The number of nodes allocated to this instance. + This may be zero in API responses for instances that are not + yet in state ``READY``. + + See `the + documentation `__ + for more information about nodes. + state (~.spanner_instance_admin.Instance.State): + Output only. The current instance state. For + [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance], + the state must be either omitted or set to ``CREATING``. For + [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance], + the state must be either omitted or set to ``READY``. + labels (Sequence[~.spanner_instance_admin.Instance.LabelsEntry]): + Cloud Labels are a flexible and lightweight mechanism for + organizing cloud resources into groups that reflect a + customer's organizational needs and deployment strategies. + Cloud Labels can be used to filter collections of resources. + They can be used to control how resource metrics are + aggregated. And they can be used as arguments to policy + management rules (e.g. route, firewall, load balancing, + etc.). + + - Label keys must be between 1 and 63 characters long and + must conform to the following regular expression: + ``[a-z]([-a-z0-9]*[a-z0-9])?``. + - Label values must be between 0 and 63 characters long and + must conform to the regular expression + ``([a-z]([-a-z0-9]*[a-z0-9])?)?``. + - No more than 64 labels can be associated with a given + resource. + + See https://goo.gl/xmQnxf for more information on and + examples of labels. + + If you plan to use labels in your own code, please note that + additional characters may be allowed in the future. And so + you are advised to use an internal label representation, + such as JSON, which doesn't rely upon specific characters + being disallowed. For example, representing labels as the + string: name + "*" + value would prove problematic if we + were to allow "*" in a future release. + endpoint_uris (Sequence[str]): + Deprecated. This field is not populated. + """ + + class State(proto.Enum): + r"""Indicates the current state of the instance.""" + STATE_UNSPECIFIED = 0 + CREATING = 1 + READY = 2 + + name = proto.Field(proto.STRING, number=1) + + config = proto.Field(proto.STRING, number=2) + + display_name = proto.Field(proto.STRING, number=3) + + node_count = proto.Field(proto.INT32, number=5) + + state = proto.Field(proto.ENUM, number=6, enum=State,) + + labels = proto.MapField(proto.STRING, proto.STRING, number=7) + + endpoint_uris = proto.RepeatedField(proto.STRING, number=8) + + +class ListInstanceConfigsRequest(proto.Message): + r"""The request for + [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]. + + Attributes: + parent (str): + Required. The name of the project for which a list of + supported instance configurations is requested. Values are + of the form ``projects/``. + page_size (int): + Number of instance configurations to be + returned in the response. If 0 or less, defaults + to the server's maximum allowed page size. + page_token (str): + If non-empty, ``page_token`` should contain a + [next_page_token][google.spanner.admin.instance.v1.ListInstanceConfigsResponse.next_page_token] + from a previous + [ListInstanceConfigsResponse][google.spanner.admin.instance.v1.ListInstanceConfigsResponse]. + """ + + parent = proto.Field(proto.STRING, number=1) + + page_size = proto.Field(proto.INT32, number=2) + + page_token = proto.Field(proto.STRING, number=3) + + +class ListInstanceConfigsResponse(proto.Message): + r"""The response for + [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]. + + Attributes: + instance_configs (Sequence[~.spanner_instance_admin.InstanceConfig]): + The list of requested instance + configurations. + next_page_token (str): + ``next_page_token`` can be sent in a subsequent + [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs] + call to fetch more of the matching instance configurations. + """ + + @property + def raw_page(self): + return self + + instance_configs = proto.RepeatedField( + proto.MESSAGE, number=1, message="InstanceConfig", + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class GetInstanceConfigRequest(proto.Message): + r"""The request for + [GetInstanceConfigRequest][google.spanner.admin.instance.v1.InstanceAdmin.GetInstanceConfig]. + + Attributes: + name (str): + Required. The name of the requested instance configuration. + Values are of the form + ``projects//instanceConfigs/``. + """ + + name = proto.Field(proto.STRING, number=1) + + +class GetInstanceRequest(proto.Message): + r"""The request for + [GetInstance][google.spanner.admin.instance.v1.InstanceAdmin.GetInstance]. + + Attributes: + name (str): + Required. The name of the requested instance. Values are of + the form ``projects//instances/``. + field_mask (~.gp_field_mask.FieldMask): + If field_mask is present, specifies the subset of + [Instance][google.spanner.admin.instance.v1.Instance] fields + that should be returned. If absent, all + [Instance][google.spanner.admin.instance.v1.Instance] fields + are returned. + """ + + name = proto.Field(proto.STRING, number=1) + + field_mask = proto.Field(proto.MESSAGE, number=2, message=gp_field_mask.FieldMask,) + + +class CreateInstanceRequest(proto.Message): + r"""The request for + [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]. + + Attributes: + parent (str): + Required. The name of the project in which to create the + instance. Values are of the form ``projects/``. + instance_id (str): + Required. The ID of the instance to create. Valid + identifiers are of the form ``[a-z][-a-z0-9]*[a-z0-9]`` and + must be between 2 and 64 characters in length. + instance (~.spanner_instance_admin.Instance): + Required. The instance to create. The name may be omitted, + but if specified must be + ``/instances/``. + """ + + parent = proto.Field(proto.STRING, number=1) + + instance_id = proto.Field(proto.STRING, number=2) + + instance = proto.Field(proto.MESSAGE, number=3, message="Instance",) + + +class ListInstancesRequest(proto.Message): + r"""The request for + [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances]. + + Attributes: + parent (str): + Required. The name of the project for which a list of + instances is requested. Values are of the form + ``projects/``. + page_size (int): + Number of instances to be returned in the + response. If 0 or less, defaults to the server's + maximum allowed page size. + page_token (str): + If non-empty, ``page_token`` should contain a + [next_page_token][google.spanner.admin.instance.v1.ListInstancesResponse.next_page_token] + from a previous + [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse]. + filter (str): + An expression for filtering the results of the request. + Filter rules are case insensitive. The fields eligible for + filtering are: + + - ``name`` + - ``display_name`` + - ``labels.key`` where key is the name of a label + + Some examples of using filters are: + + - ``name:*`` --> The instance has a name. + - ``name:Howl`` --> The instance's name contains the string + "howl". + - ``name:HOWL`` --> Equivalent to above. + - ``NAME:howl`` --> Equivalent to above. + - ``labels.env:*`` --> The instance has the label "env". + - ``labels.env:dev`` --> The instance has the label "env" + and the value of the label contains the string "dev". + - ``name:howl labels.env:dev`` --> The instance's name + contains "howl" and it has the label "env" with its value + containing "dev". + """ + + parent = proto.Field(proto.STRING, number=1) + + page_size = proto.Field(proto.INT32, number=2) + + page_token = proto.Field(proto.STRING, number=3) + + filter = proto.Field(proto.STRING, number=4) + + +class ListInstancesResponse(proto.Message): + r"""The response for + [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances]. + + Attributes: + instances (Sequence[~.spanner_instance_admin.Instance]): + The list of requested instances. + next_page_token (str): + ``next_page_token`` can be sent in a subsequent + [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances] + call to fetch more of the matching instances. + """ + + @property + def raw_page(self): + return self + + instances = proto.RepeatedField(proto.MESSAGE, number=1, message="Instance",) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class UpdateInstanceRequest(proto.Message): + r"""The request for + [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]. + + Attributes: + instance (~.spanner_instance_admin.Instance): + Required. The instance to update, which must always include + the instance name. Otherwise, only fields mentioned in + [field_mask][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask] + need be included. + field_mask (~.gp_field_mask.FieldMask): + Required. A mask specifying which fields in + [Instance][google.spanner.admin.instance.v1.Instance] should + be updated. The field mask must always be specified; this + prevents any future fields in + [Instance][google.spanner.admin.instance.v1.Instance] from + being erased accidentally by clients that do not know about + them. + """ + + instance = proto.Field(proto.MESSAGE, number=1, message="Instance",) + + field_mask = proto.Field(proto.MESSAGE, number=2, message=gp_field_mask.FieldMask,) + + +class DeleteInstanceRequest(proto.Message): + r"""The request for + [DeleteInstance][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstance]. + + Attributes: + name (str): + Required. The name of the instance to be deleted. Values are + of the form ``projects//instances/`` + """ + + name = proto.Field(proto.STRING, number=1) + + +class CreateInstanceMetadata(proto.Message): + r"""Metadata type for the operation returned by + [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]. + + Attributes: + instance (~.spanner_instance_admin.Instance): + The instance being created. + start_time (~.timestamp.Timestamp): + The time at which the + [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance] + request was received. + cancel_time (~.timestamp.Timestamp): + The time at which this operation was + cancelled. If set, this operation is in the + process of undoing itself (which is guaranteed + to succeed) and cannot be cancelled again. + end_time (~.timestamp.Timestamp): + The time at which this operation failed or + was completed successfully. + """ + + instance = proto.Field(proto.MESSAGE, number=1, message="Instance",) + + start_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) + + cancel_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + + end_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,) + + +class UpdateInstanceMetadata(proto.Message): + r"""Metadata type for the operation returned by + [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]. + + Attributes: + instance (~.spanner_instance_admin.Instance): + The desired end state of the update. + start_time (~.timestamp.Timestamp): + The time at which + [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance] + request was received. + cancel_time (~.timestamp.Timestamp): + The time at which this operation was + cancelled. If set, this operation is in the + process of undoing itself (which is guaranteed + to succeed) and cannot be cancelled again. + end_time (~.timestamp.Timestamp): + The time at which this operation failed or + was completed successfully. + """ + + instance = proto.Field(proto.MESSAGE, number=1, message="Instance",) + + start_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) + + cancel_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + + end_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/spanner_v1/__init__.py b/google/cloud/spanner_v1/__init__.py index 8611405cd6..a6e8b6b6bf 100644 --- a/google/cloud/spanner_v1/__init__.py +++ b/google/cloud/spanner_v1/__init__.py @@ -1,4 +1,6 @@ -# Copyright 2017, Google LLC All rights reserved. +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,17 +13,53 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +# from __future__ import absolute_import import pkg_resources __version__ = pkg_resources.get_distribution("google-cloud-spanner").version +from .services.spanner import SpannerClient +from .types.keys import KeyRange as KeyRangePB +from .types.keys import KeySet as KeySetPB +from .types.mutation import Mutation +from .types.query_plan import PlanNode +from .types.query_plan import QueryPlan +from .types.result_set import PartialResultSet +from .types.result_set import ResultSet +from .types.result_set import ResultSetMetadata +from .types.result_set import ResultSetStats +from .types.spanner import BatchCreateSessionsRequest +from .types.spanner import BatchCreateSessionsResponse +from .types.spanner import BeginTransactionRequest +from .types.spanner import CommitRequest +from .types.spanner import CommitResponse +from .types.spanner import CreateSessionRequest +from .types.spanner import DeleteSessionRequest +from .types.spanner import ExecuteBatchDmlRequest +from .types.spanner import ExecuteBatchDmlResponse +from .types.spanner import ExecuteSqlRequest +from .types.spanner import GetSessionRequest +from .types.spanner import ListSessionsRequest +from .types.spanner import ListSessionsResponse +from .types.spanner import Partition +from .types.spanner import PartitionOptions +from .types.spanner import PartitionQueryRequest +from .types.spanner import PartitionReadRequest +from .types.spanner import PartitionResponse +from .types.spanner import ReadRequest +from .types.spanner import RollbackRequest +from .types.spanner import Session +from .types.transaction import Transaction +from .types.transaction import TransactionOptions +from .types.transaction import TransactionSelector +from .types.type import StructType +from .types.type import Type +from .types.type import TypeCode + from google.cloud.spanner_v1 import param_types -from google.cloud.spanner_v1 import types from google.cloud.spanner_v1.client import Client -from google.cloud.spanner_v1.gapic import enums from google.cloud.spanner_v1.keyset import KeyRange from google.cloud.spanner_v1.keyset import KeySet from google.cloud.spanner_v1.pool import AbstractSessionPool @@ -33,7 +71,6 @@ COMMIT_TIMESTAMP = "spanner.commit_timestamp()" """Placeholder be used to store commit timestamp of a transaction in a column. - This value can only be used for timestamp columns that have set the option ``(allow_commit_timestamp=true)`` in the schema. """ @@ -43,7 +80,6 @@ # google.cloud.spanner_v1 "__version__", "param_types", - "types", # google.cloud.spanner_v1.client "Client", # google.cloud.spanner_v1.keyset @@ -55,8 +91,45 @@ "FixedSizePool", "PingingPool", "TransactionPingingPool", - # google.cloud.spanner_v1.gapic - "enums", # local "COMMIT_TIMESTAMP", + # google.cloud.spanner_v1.types + "BatchCreateSessionsRequest", + "BatchCreateSessionsResponse", + "BeginTransactionRequest", + "CommitRequest", + "CommitResponse", + "CreateSessionRequest", + "DeleteSessionRequest", + "ExecuteBatchDmlRequest", + "ExecuteBatchDmlResponse", + "ExecuteSqlRequest", + "GetSessionRequest", + "KeyRangePB", + "KeySetPB", + "ListSessionsRequest", + "ListSessionsResponse", + "Mutation", + "PartialResultSet", + "Partition", + "PartitionOptions", + "PartitionQueryRequest", + "PartitionReadRequest", + "PartitionResponse", + "PlanNode", + "QueryPlan", + "ReadRequest", + "ResultSet", + "ResultSetMetadata", + "ResultSetStats", + "RollbackRequest", + "Session", + "StructType", + "Transaction", + "TransactionOptions", + "TransactionSelector", + "Type", + "TypeCode", + # google.cloud.spanner_v1.services + "SpannerClient", ) diff --git a/google/cloud/spanner_v1/_helpers.py b/google/cloud/spanner_v1/_helpers.py index 6437c65e7f..4ac13f7c6b 100644 --- a/google/cloud/spanner_v1/_helpers.py +++ b/google/cloud/spanner_v1/_helpers.py @@ -26,8 +26,8 @@ from google.api_core import datetime_helpers from google.cloud._helpers import _date_from_iso8601_date from google.cloud._helpers import _datetime_to_rfc3339 -from google.cloud.spanner_v1.proto import type_pb2 -from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest +from google.cloud.spanner_v1 import TypeCode +from google.cloud.spanner_v1 import ExecuteSqlRequest def _try_to_coerce_bytes(bytestring): @@ -53,19 +53,19 @@ def _merge_query_options(base, merge): """Merge higher precedence QueryOptions with current QueryOptions. :type base: - :class:`google.cloud.spanner_v1.proto.ExecuteSqlRequest.QueryOptions` + :class:`~google.cloud.spanner_v1.ExecuteSqlRequest.QueryOptions` or :class:`dict` or None :param base: The current QueryOptions that is intended for use. :type merge: - :class:`google.cloud.spanner_v1.proto.ExecuteSqlRequest.QueryOptions` + :class:`~google.cloud.spanner_v1.ExecuteSqlRequest.QueryOptions` or :class:`dict` or None :param merge: The QueryOptions that have a higher priority than base. These options should overwrite the fields in base. :rtype: - :class:`google.cloud.spanner_v1.proto.ExecuteSqlRequest.QueryOptions` + :class:`~google.cloud.spanner_v1.ExecuteSqlRequest.QueryOptions` or None :returns: QueryOptions object formed by merging the two given QueryOptions. @@ -81,7 +81,7 @@ def _merge_query_options(base, merge): merge = ExecuteSqlRequest.QueryOptions( optimizer_version=merge.get("optimizer_version", "") ) - combined.MergeFrom(merge) + type(combined).pb(combined).MergeFrom(type(merge).pb(merge)) if not combined.optimizer_version: return None return combined @@ -161,13 +161,48 @@ def _make_list_value_pbs(values): # pylint: disable=too-many-branches +def _parse_value(value, field_type): + if value is None: + return None + if field_type.code == TypeCode.STRING: + result = value + elif field_type.code == TypeCode.BYTES: + result = value.encode("utf8") + elif field_type.code == TypeCode.BOOL: + result = value + elif field_type.code == TypeCode.INT64: + result = int(value) + elif field_type.code == TypeCode.FLOAT64: + if isinstance(value, str): + result = float(value) + else: + result = value + elif field_type.code == TypeCode.DATE: + result = _date_from_iso8601_date(value) + elif field_type.code == TypeCode.TIMESTAMP: + DatetimeWithNanoseconds = datetime_helpers.DatetimeWithNanoseconds + result = DatetimeWithNanoseconds.from_rfc3339(value) + elif field_type.code == TypeCode.ARRAY: + result = [_parse_value(item, field_type.array_element_type) for item in value] + elif field_type.code == TypeCode.STRUCT: + result = [ + _parse_value(item, field_type.struct_type.fields[i].type_) + for (i, item) in enumerate(value) + ] + elif field_type.code == TypeCode.NUMERIC: + result = decimal.Decimal(value) + else: + raise ValueError("Unknown type: %s" % (field_type,)) + return result + + def _parse_value_pb(value_pb, field_type): """Convert a Value protobuf to cell data. :type value_pb: :class:`~google.protobuf.struct_pb2.Value` :param value_pb: protobuf to convert - :type field_type: :class:`~google.cloud.spanner_v1.proto.type_pb2.Type` + :type field_type: :class:`~google.cloud.spanner_v1.Type` :param field_type: type code for the value :rtype: varies on field_type @@ -176,39 +211,15 @@ def _parse_value_pb(value_pb, field_type): """ if value_pb.HasField("null_value"): return None - if field_type.code == type_pb2.STRING: - result = value_pb.string_value - elif field_type.code == type_pb2.BYTES: - result = value_pb.string_value.encode("utf8") - elif field_type.code == type_pb2.BOOL: - result = value_pb.bool_value - elif field_type.code == type_pb2.INT64: - result = int(value_pb.string_value) - elif field_type.code == type_pb2.FLOAT64: - if value_pb.HasField("string_value"): - result = float(value_pb.string_value) - else: - result = value_pb.number_value - elif field_type.code == type_pb2.DATE: - result = _date_from_iso8601_date(value_pb.string_value) - elif field_type.code == type_pb2.TIMESTAMP: - DatetimeWithNanoseconds = datetime_helpers.DatetimeWithNanoseconds - result = DatetimeWithNanoseconds.from_rfc3339(value_pb.string_value) - elif field_type.code == type_pb2.ARRAY: - result = [ - _parse_value_pb(item_pb, field_type.array_element_type) - for item_pb in value_pb.list_value.values - ] - elif field_type.code == type_pb2.STRUCT: - result = [ - _parse_value_pb(item_pb, field_type.struct_type.fields[i].type) - for (i, item_pb) in enumerate(value_pb.list_value.values) - ] - elif field_type.code == type_pb2.NUMERIC: - result = decimal.Decimal(value_pb.string_value) - else: - raise ValueError("Unknown type: %s" % (field_type,)) - return result + if value_pb.HasField("string_value"): + return _parse_value(value_pb.string_value, field_type) + if value_pb.HasField("bool_value"): + return _parse_value(value_pb.bool_value, field_type) + if value_pb.HasField("number_value"): + return _parse_value(value_pb.number_value, field_type) + if value_pb.HasField("list_value"): + return _parse_value(value_pb.list_value, field_type) + raise ValueError("No value set in Value: %s" % (value_pb,)) # pylint: enable=too-many-branches @@ -220,7 +231,7 @@ def _parse_list_value_pbs(rows, row_type): :type rows: list of :class:`~google.protobuf.struct_pb2.ListValue` :param rows: row data returned from a read/query - :type row_type: :class:`~google.cloud.spanner_v1.proto.type_pb2.StructType` + :type row_type: :class:`~google.cloud.spanner_v1.StructType` :param row_type: row schema specification :rtype: list of list of cell data @@ -230,7 +241,7 @@ def _parse_list_value_pbs(rows, row_type): for row in rows: row_data = [] for value_pb, field in zip(row.values, row_type.fields): - row_data.append(_parse_value_pb(value_pb, field.type)) + row_data.append(_parse_value_pb(value_pb, field.type_)) result.append(row_data) return result diff --git a/google/cloud/spanner_v1/_opentelemetry_tracing.py b/google/cloud/spanner_v1/_opentelemetry_tracing.py index 60e68598e9..71ac518992 100644 --- a/google/cloud/spanner_v1/_opentelemetry_tracing.py +++ b/google/cloud/spanner_v1/_opentelemetry_tracing.py @@ -17,7 +17,7 @@ from contextlib import contextmanager from google.api_core.exceptions import GoogleAPICallError -from google.cloud.spanner_v1.gapic import spanner_client +from google.cloud.spanner_v1 import SpannerClient try: from opentelemetry import trace @@ -41,9 +41,9 @@ def trace_call(name, session, extra_attributes=None): # Set base attributes that we know for every trace created attributes = { "db.type": "spanner", - "db.url": spanner_client.SpannerClient.SERVICE_ADDRESS, + "db.url": SpannerClient.DEFAULT_ENDPOINT, "db.instance": session._database.name, - "net.host.name": spanner_client.SpannerClient.SERVICE_ADDRESS, + "net.host.name": SpannerClient.DEFAULT_ENDPOINT, } if extra_attributes: diff --git a/google/cloud/spanner_v1/backup.py b/google/cloud/spanner_v1/backup.py index 2aaa1c0f5c..405a9e2be2 100644 --- a/google/cloud/spanner_v1/backup.py +++ b/google/cloud/spanner_v1/backup.py @@ -16,10 +16,9 @@ import re -from google.cloud._helpers import _datetime_to_pb_timestamp, _pb_timestamp_to_datetime from google.cloud.exceptions import NotFound -from google.cloud.spanner_admin_database_v1.gapic import enums +from google.cloud.spanner_admin_database_v1 import Backup as BackupPB from google.cloud.spanner_v1._helpers import _metadata_with_prefix _BACKUP_NAME_RE = re.compile( @@ -123,7 +122,7 @@ def size_bytes(self): def state(self): """State of this backup. - :rtype: :class:`~google.cloud.spanner_admin_database_v1.gapic.enums.Backup.State` + :rtype: :class:`~google.cloud.spanner_admin_database_v1.Backup.State` :returns: an enum describing the state of the backup """ return self._state @@ -191,13 +190,13 @@ def create(self): raise ValueError("database not set") api = self._instance._client.database_admin_api metadata = _metadata_with_prefix(self.name) - backup = { - "database": self._database, - "expire_time": _datetime_to_pb_timestamp(self.expire_time), - } + backup = BackupPB(database=self._database, expire_time=self.expire_time,) future = api.create_backup( - self._instance.name, self.backup_id, backup, metadata=metadata + parent=self._instance.name, + backup_id=self.backup_id, + backup=backup, + metadata=metadata, ) return future @@ -211,7 +210,7 @@ def exists(self): metadata = _metadata_with_prefix(self.name) try: - api.get_backup(self.name, metadata=metadata) + api.get_backup(name=self.name, metadata=metadata) except NotFound: return False return True @@ -225,12 +224,12 @@ def reload(self): """ api = self._instance._client.database_admin_api metadata = _metadata_with_prefix(self.name) - pb = api.get_backup(self.name, metadata=metadata) + pb = api.get_backup(name=self.name, metadata=metadata) self._database = pb.database - self._expire_time = _pb_timestamp_to_datetime(pb.expire_time) - self._create_time = _pb_timestamp_to_datetime(pb.create_time) + self._expire_time = pb.expire_time + self._create_time = pb.create_time self._size_bytes = pb.size_bytes - self._state = enums.Backup.State(pb.state) + self._state = BackupPB.State(pb.state) self._referencing_databases = pb.referencing_databases def update_expire_time(self, new_expire_time): @@ -241,12 +240,11 @@ def update_expire_time(self, new_expire_time): """ api = self._instance._client.database_admin_api metadata = _metadata_with_prefix(self.name) - backup_update = { - "name": self.name, - "expire_time": _datetime_to_pb_timestamp(new_expire_time), - } + backup_update = BackupPB(name=self.name, expire_time=new_expire_time,) update_mask = {"paths": ["expire_time"]} - api.update_backup(backup_update, update_mask, metadata=metadata) + api.update_backup( + backup=backup_update, update_mask=update_mask, metadata=metadata + ) self._expire_time = new_expire_time def is_ready(self): @@ -255,21 +253,10 @@ def is_ready(self): :rtype: bool :returns: True if the backup state is READY, else False. """ - return self.state == enums.Backup.State.READY + return self.state == BackupPB.State.READY def delete(self): """Delete this backup.""" api = self._instance._client.database_admin_api metadata = _metadata_with_prefix(self.name) - api.delete_backup(self.name, metadata=metadata) - - -class BackupInfo(object): - def __init__(self, backup, create_time, source_database): - self.backup = backup - self.create_time = _pb_timestamp_to_datetime(create_time) - self.source_database = source_database - - @classmethod - def from_pb(cls, pb): - return cls(pb.backup, pb.create_time, pb.source_database) + api.delete_backup(name=self.name, metadata=metadata) diff --git a/google/cloud/spanner_v1/batch.py b/google/cloud/spanner_v1/batch.py index 7ab394b215..27cd3c8b58 100644 --- a/google/cloud/spanner_v1/batch.py +++ b/google/cloud/spanner_v1/batch.py @@ -14,11 +14,10 @@ """Context manager for Cloud Spanner batched writes.""" -from google.cloud.spanner_v1.proto.mutation_pb2 import Mutation -from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionOptions +from google.cloud.spanner_v1 import Mutation +from google.cloud.spanner_v1 import TransactionOptions # pylint: disable=ungrouped-imports -from google.cloud._helpers import _pb_timestamp_to_datetime from google.cloud.spanner_v1._helpers import _SessionWrapper from google.cloud.spanner_v1._helpers import _make_list_value_pbs from google.cloud.spanner_v1._helpers import _metadata_with_prefix @@ -151,12 +150,12 @@ def commit(self): trace_attributes = {"num_mutations": len(self._mutations)} with trace_call("CloudSpanner.Commit", self._session, trace_attributes): response = api.commit( - self._session.name, + session=self._session.name, mutations=self._mutations, single_use_transaction=txn_options, metadata=metadata, ) - self.committed = _pb_timestamp_to_datetime(response.commit_timestamp) + self.committed = response.commit_timestamp return self.committed def __enter__(self): @@ -183,7 +182,7 @@ def _make_write_pb(table, columns, values): :type values: list of lists :param values: Values to be modified. - :rtype: :class:`google.cloud.spanner_v1.proto.mutation_pb2.Mutation.Write` + :rtype: :class:`google.cloud.spanner_v1.Mutation.Write` :returns: Write protobuf """ return Mutation.Write( diff --git a/google/cloud/spanner_v1/client.py b/google/cloud/spanner_v1/client.py index e669fe8d27..b433f0c7b0 100644 --- a/google/cloud/spanner_v1/client.py +++ b/google/cloud/spanner_v1/client.py @@ -31,22 +31,19 @@ from google.auth.credentials import AnonymousCredentials import google.api_core.client_options -from google.cloud.spanner_admin_instance_v1.gapic.transports import ( - instance_admin_grpc_transport, -) +# pylint: disable=line-too-long -from google.cloud.spanner_admin_database_v1.gapic.transports import ( - database_admin_grpc_transport, +from google.cloud.spanner_admin_instance_v1.services.instance_admin.transports.grpc import ( + InstanceAdminGrpcTransport, ) -# pylint: disable=line-too-long -from google.cloud.spanner_admin_database_v1.gapic.database_admin_client import ( # noqa - DatabaseAdminClient, -) -from google.cloud.spanner_admin_instance_v1.gapic.instance_admin_client import ( # noqa - InstanceAdminClient, +from google.cloud.spanner_admin_database_v1.services.database_admin.transports.grpc import ( + DatabaseAdminGrpcTransport, ) +from google.cloud.spanner_admin_database_v1 import DatabaseAdminClient +from google.cloud.spanner_admin_instance_v1 import InstanceAdminClient + # pylint: enable=line-too-long from google.cloud.client import ClientWithProject @@ -54,7 +51,9 @@ from google.cloud.spanner_v1._helpers import _merge_query_options, _metadata_with_prefix from google.cloud.spanner_v1.instance import DEFAULT_NODE_COUNT from google.cloud.spanner_v1.instance import Instance -from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest +from google.cloud.spanner_v1 import ExecuteSqlRequest +from google.cloud.spanner_admin_instance_v1 import ListInstanceConfigsRequest +from google.cloud.spanner_admin_instance_v1 import ListInstancesRequest _CLIENT_INFO = client_info.ClientInfo(client_library_version=__version__) EMULATOR_ENV_VAR = "SPANNER_EMULATOR_HOST" @@ -65,10 +64,6 @@ ) % ((EMULATOR_ENV_VAR,) * 3) SPANNER_ADMIN_SCOPE = "https://www.googleapis.com/auth/spanner.admin" OPTIMIZER_VERSION_ENV_VAR = "SPANNER_OPTIMIZER_VERSION" -_USER_AGENT_DEPRECATED = ( - "The 'user_agent' argument to 'Client' is deprecated / unused. " - "Please pass an appropriate 'client_info' instead." -) def _get_spanner_emulator_host(): @@ -79,34 +74,6 @@ def _get_spanner_optimizer_version(): return os.getenv(OPTIMIZER_VERSION_ENV_VAR, "") -class InstanceConfig(object): - """Named configurations for Spanner instances. - - :type name: str - :param name: ID of the instance configuration - - :type display_name: str - :param display_name: Name of the instance configuration - """ - - def __init__(self, name, display_name): - self.name = name - self.display_name = display_name - - @classmethod - def from_pb(cls, config_pb): - """Construct an instance from the equvalent protobuf. - - :type config_pb: - :class:`~google.spanner.v1.spanner_instance_admin_pb2.InstanceConfig` - :param config_pb: the protobuf to parse - - :rtype: :class:`InstanceConfig` - :returns: an instance of this class - """ - return cls(config_pb.name, config_pb.display_name) - - class Client(ClientWithProject): """Client for interacting with Cloud Spanner API. @@ -135,23 +102,18 @@ class Client(ClientWithProject): you only need to set this if you're developing your own library or partner tool. - :type user_agent: str - :param user_agent: - (Deprecated) The user agent to be used with API request. - Not used. - :type client_options: :class:`~google.api_core.client_options.ClientOptions` or :class:`dict` :param client_options: (Optional) Client options used to set user options on the client. API Endpoint should be set through client_options. :type query_options: - :class:`~google.cloud.spanner_v1.proto.ExecuteSqlRequest.QueryOptions` + :class:`~google.cloud.spanner_v1.ExecuteSqlRequest.QueryOptions` or :class:`dict` :param query_options: (Optional) Query optimizer configuration to use for the given query. If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.QueryOptions` + message :class:`~google.cloud.spanner_v1.QueryOptions` :raises: :class:`ValueError ` if both ``read_only`` and ``admin`` are :data:`True` @@ -159,7 +121,6 @@ class Client(ClientWithProject): _instance_admin_api = None _database_admin_api = None - user_agent = None _SET_PROJECT = True # Used by from_service_account_json() SCOPE = (SPANNER_ADMIN_SCOPE,) @@ -170,7 +131,6 @@ def __init__( project=None, credentials=None, client_info=_CLIENT_INFO, - user_agent=None, client_options=None, query_options=None, ): @@ -206,10 +166,6 @@ def __init__( # Environment flag config has higher precedence than application config. self._query_options = _merge_query_options(query_options, env_query_options) - if user_agent is not None: - warnings.warn(_USER_AGENT_DEPRECATED, DeprecationWarning, stacklevel=2) - self.user_agent = user_agent - if self._emulator_host is not None and ( "http://" in self._emulator_host or "https://" in self._emulator_host ): @@ -249,7 +205,7 @@ def instance_admin_api(self): """Helper for session-related API calls.""" if self._instance_admin_api is None: if self._emulator_host is not None: - transport = instance_admin_grpc_transport.InstanceAdminGrpcTransport( + transport = InstanceAdminGrpcTransport( channel=grpc.insecure_channel(target=self._emulator_host) ) self._instance_admin_api = InstanceAdminClient( @@ -270,7 +226,7 @@ def database_admin_api(self): """Helper for session-related API calls.""" if self._database_admin_api is None: if self._emulator_host is not None: - transport = database_admin_grpc_transport.DatabaseAdminGrpcTransport( + transport = DatabaseAdminGrpcTransport( channel=grpc.insecure_channel(target=self._emulator_host) ) self._database_admin_api = DatabaseAdminClient( @@ -297,7 +253,7 @@ def copy(self): """ return self.__class__(project=self.project, credentials=self._credentials) - def list_instance_configs(self, page_size=None, page_token=None): + def list_instance_configs(self, page_size=None): """List available instance configurations for the client's project. .. _RPC docs: https://cloud.google.com/spanner/docs/reference/rpc/\ @@ -312,27 +268,19 @@ def list_instance_configs(self, page_size=None, page_token=None): from this request. Non-positive values are ignored. Defaults to a sensible value set by the API. - :type page_token: str - :param page_token: - Optional. If present, return the next batch of configs, using - the value, which must correspond to the ``nextPageToken`` value - returned in the previous response. Deprecated: use the ``pages`` - property of the returned iterator instead of manually passing - the token. - :rtype: :class:`~google.api_core.page_iterator.Iterator` :returns: Iterator of - :class:`~google.cloud.spanner_v1.instance.InstanceConfig` + :class:`~google.cloud.spanner_admin_instance_v1.types.InstanceConfig` resources within the client's project. """ metadata = _metadata_with_prefix(self.project_name) - path = "projects/%s" % (self.project,) + request = ListInstanceConfigsRequest( + parent=self.project_name, page_size=page_size + ) page_iter = self.instance_admin_api.list_instance_configs( - path, page_size=page_size, metadata=metadata + request=request, metadata=metadata ) - page_iter.next_page_token = page_token - page_iter.item_to_value = _item_to_instance_config return page_iter def instance( @@ -377,7 +325,7 @@ def instance( self._emulator_host, ) - def list_instances(self, filter_="", page_size=None, page_token=None): + def list_instances(self, filter_="", page_size=None): """List instances for the client's project. See @@ -393,54 +341,16 @@ def list_instances(self, filter_="", page_size=None, page_token=None): from this request. Non-positive values are ignored. Defaults to a sensible value set by the API. - :type page_token: str - :param page_token: - Optional. If present, return the next batch of instances, using - the value, which must correspond to the ``nextPageToken`` value - returned in the previous response. Deprecated: use the ``pages`` - property of the returned iterator instead of manually passing - the token. - :rtype: :class:`~google.api_core.page_iterator.Iterator` :returns: Iterator of :class:`~google.cloud.spanner_v1.instance.Instance` resources within the client's project. """ metadata = _metadata_with_prefix(self.project_name) - path = "projects/%s" % (self.project,) + request = ListInstancesRequest( + parent=self.project_name, filter=filter_, page_size=page_size + ) page_iter = self.instance_admin_api.list_instances( - path, filter_=filter_, page_size=page_size, metadata=metadata + request=request, metadata=metadata ) - page_iter.item_to_value = self._item_to_instance - page_iter.next_page_token = page_token return page_iter - - def _item_to_instance(self, iterator, instance_pb): - """Convert an instance protobuf to the native object. - - :type iterator: :class:`~google.api_core.page_iterator.Iterator` - :param iterator: The iterator that is currently in use. - - :type instance_pb: :class:`~google.spanner.admin.instance.v1.Instance` - :param instance_pb: An instance returned from the API. - - :rtype: :class:`~google.cloud.spanner_v1.instance.Instance` - :returns: The next instance in the page. - """ - return Instance.from_pb(instance_pb, self) - - -def _item_to_instance_config(iterator, config_pb): # pylint: disable=unused-argument - """Convert an instance config protobuf to the native object. - - :type iterator: :class:`~google.api_core.page_iterator.Iterator` - :param iterator: The iterator that is currently in use. - - :type config_pb: - :class:`~google.spanner.admin.instance.v1.InstanceConfig` - :param config_pb: An instance config returned from the API. - - :rtype: :class:`~google.cloud.spanner_v1.instance.InstanceConfig` - :returns: The next instance config in the page. - """ - return InstanceConfig.from_pb(config_pb) diff --git a/google/cloud/spanner_v1/database.py b/google/cloud/spanner_v1/database.py index 8ece803847..c1c7953648 100644 --- a/google/cloud/spanner_v1/database.py +++ b/google/cloud/spanner_v1/database.py @@ -21,23 +21,19 @@ import threading import google.auth.credentials +from google.api_core.retry import Retry from google.api_core.retry import if_exception_type -from google.protobuf.struct_pb2 import Struct from google.cloud.exceptions import NotFound from google.api_core.exceptions import Aborted import six # pylint: disable=ungrouped-imports -from google.cloud.spanner_admin_database_v1.gapic import enums +from google.cloud.spanner_admin_database_v1 import Database as DatabasePB from google.cloud.spanner_v1._helpers import ( - _make_value_pb, _merge_query_options, _metadata_with_prefix, ) -from google.cloud.spanner_v1.backup import BackupInfo from google.cloud.spanner_v1.batch import Batch -from google.cloud.spanner_v1.gapic.spanner_client import SpannerClient -from google.cloud.spanner_v1.gapic.transports import spanner_grpc_transport from google.cloud.spanner_v1.keyset import KeySet from google.cloud.spanner_v1.pool import BurstyPool from google.cloud.spanner_v1.pool import SessionCheckout @@ -45,11 +41,17 @@ from google.cloud.spanner_v1.snapshot import _restart_on_unavailable from google.cloud.spanner_v1.snapshot import Snapshot from google.cloud.spanner_v1.streamed import StreamedResultSet -from google.cloud.spanner_v1.proto.transaction_pb2 import ( +from google.cloud.spanner_v1 import SpannerClient +from google.cloud.spanner_v1.services.spanner.transports.grpc import ( + SpannerGrpcTransport, +) +from google.cloud.spanner_admin_database_v1 import CreateDatabaseRequest +from google.cloud.spanner_admin_database_v1 import UpdateDatabaseDdlRequest +from google.cloud.spanner_v1 import ExecuteSqlRequest +from google.cloud.spanner_v1 import ( TransactionSelector, TransactionOptions, ) -from google.cloud._helpers import _pb_timestamp_to_datetime # pylint: enable=ungrouped-imports @@ -65,6 +67,8 @@ _DATABASE_METADATA_FILTER = "name:{0}/operations/" +DEFAULT_RETRY_BACKOFF = Retry(initial=0.02, maximum=32, multiplier=1.3) + class Database(object): """Representation of a Cloud Spanner Database. @@ -115,7 +119,7 @@ def from_pb(cls, database_pb, instance, pool=None): """Creates an instance of this class from a protobuf. :type database_pb: - :class:`~google.spanner.v2.spanner_instance_admin_pb2.Instance` + :class:`~google.cloud.spanner_admin_instance_v1.Instance` :param database_pb: A instance protobuf object. :type instance: :class:`~google.cloud.spanner_v1.instance.Instance` @@ -176,7 +180,7 @@ def name(self): def state(self): """State of this database. - :rtype: :class:`~google.cloud.spanner_admin_database_v1.gapic.enums.Database.State` + :rtype: :class:`~google.cloud.spanner_admin_database_v1.Database.State` :returns: an enum describing the state of the database """ return self._state @@ -219,7 +223,7 @@ def spanner_api(self): client_info = self._instance._client._client_info client_options = self._instance._client._client_options if self._instance.emulator_host is not None: - transport = spanner_grpc_transport.SpannerGrpcTransport( + transport = SpannerGrpcTransport( channel=grpc.insecure_channel(self._instance.emulator_host) ) self._spanner_api = SpannerClient( @@ -265,12 +269,12 @@ def create(self): if "-" in db_name: db_name = "`%s`" % (db_name,) - future = api.create_database( + request = CreateDatabaseRequest( parent=self._instance.name, create_statement="CREATE DATABASE %s" % (db_name,), extra_statements=list(self._ddl_statements), - metadata=metadata, ) + future = api.create_database(request=request, metadata=metadata) return future def exists(self): @@ -286,7 +290,7 @@ def exists(self): metadata = _metadata_with_prefix(self.name) try: - api.get_database_ddl(self.name, metadata=metadata) + api.get_database_ddl(database=self.name, metadata=metadata) except NotFound: return False return True @@ -303,11 +307,11 @@ def reload(self): """ api = self._instance._client.database_admin_api metadata = _metadata_with_prefix(self.name) - response = api.get_database_ddl(self.name, metadata=metadata) + response = api.get_database_ddl(database=self.name, metadata=metadata) self._ddl_statements = tuple(response.statements) - response = api.get_database(self.name, metadata=metadata) - self._state = enums.Database.State(response.state) - self._create_time = _pb_timestamp_to_datetime(response.create_time) + response = api.get_database(name=self.name, metadata=metadata) + self._state = DatabasePB.State(response.state) + self._create_time = response.create_time self._restore_info = response.restore_info def update_ddl(self, ddl_statements, operation_id=""): @@ -331,9 +335,11 @@ def update_ddl(self, ddl_statements, operation_id=""): api = client.database_admin_api metadata = _metadata_with_prefix(self.name) - future = api.update_database_ddl( - self.name, ddl_statements, operation_id=operation_id, metadata=metadata + request = UpdateDatabaseDdlRequest( + database=self.name, statements=ddl_statements, operation_id=operation_id, ) + + future = api.update_database_ddl(request=request, metadata=metadata) return future def drop(self): @@ -344,7 +350,7 @@ def drop(self): """ api = self._instance._client.database_admin_api metadata = _metadata_with_prefix(self.name) - api.drop_database(self.name, metadata=metadata) + api.drop_database(database=self.name, metadata=metadata) def execute_partitioned_dml( self, dml, params=None, param_types=None, query_options=None @@ -364,12 +370,12 @@ def execute_partitioned_dml( required if parameters are passed. :type query_options: - :class:`~google.cloud.spanner_v1.proto.ExecuteSqlRequest.QueryOptions` + :class:`~google.cloud.spanner_v1.ExecuteSqlRequest.QueryOptions` or :class:`dict` :param query_options: (Optional) Query optimizer configuration to use for the given query. If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.QueryOptions` + message :class:`~google.cloud.spanner_v1.QueryOptions` :rtype: int :returns: Count of rows affected by the DML statement. @@ -378,13 +384,13 @@ def execute_partitioned_dml( self._instance._client._query_options, query_options ) if params is not None: + from google.cloud.spanner_v1.transaction import Transaction + if param_types is None: raise ValueError("Specify 'param_types' when passing 'params'.") - params_pb = Struct( - fields={key: _make_value_pb(value) for key, value in params.items()} - ) + params_pb = Transaction._make_params_pb(params, param_types) else: - params_pb = None + params_pb = {} api = self.spanner_api @@ -398,20 +404,21 @@ def execute_pdml(): with SessionCheckout(self._pool) as session: txn = api.begin_transaction( - session.name, txn_options, metadata=metadata + session=session.name, options=txn_options, metadata=metadata ) txn_selector = TransactionSelector(id=txn.id) - restart = functools.partial( - api.execute_streaming_sql, - session.name, - dml, + request = ExecuteSqlRequest( + session=session.name, + sql=dml, transaction=txn_selector, params=params_pb, param_types=param_types, query_options=query_options, - metadata=metadata, + ) + restart = functools.partial( + api.execute_streaming_sql, request=request, metadata=metadata, ) iterator = _restart_on_unavailable(restart) @@ -421,9 +428,7 @@ def execute_pdml(): return result_set.stats.row_count_lower_bound - retry_config = api._method_configs["ExecuteStreamingSql"].retry - - return _retry_on_aborted(execute_pdml, retry_config)() + return _retry_on_aborted(execute_pdml, DEFAULT_RETRY_BACKOFF)() def session(self, labels=None): """Factory to create a session for this database. @@ -540,7 +545,10 @@ def restore(self, source): api = self._instance._client.database_admin_api metadata = _metadata_with_prefix(self.name) future = api.restore_database( - self._instance.name, self.database_id, backup=source.name, metadata=metadata + parent=self._instance.name, + database_id=self.database_id, + backup=source.name, + metadata=metadata, ) return future @@ -551,8 +559,8 @@ def is_ready(self): :returns: True if the database state is READY_OPTIMIZING or READY, else False. """ return ( - self.state == enums.Database.State.READY_OPTIMIZING - or self.state == enums.Database.State.READY + self.state == DatabasePB.State.READY_OPTIMIZING + or self.state == DatabasePB.State.READY ) def is_optimized(self): @@ -561,7 +569,7 @@ def is_optimized(self): :rtype: bool :returns: True if the database state is READY, else False. """ - return self.state == enums.Database.State.READY + return self.state == DatabasePB.State.READY def list_database_operations(self, filter_="", page_size=None): """List database operations for the database. @@ -598,7 +606,7 @@ class BatchCheckout(object): Caller must *not* use the batch to perform API requests outside the scope of the context manager. - :type database: :class:`~google.cloud.spanner.database.Database` + :type database: :class:`~google.cloud.spanner_v1.database.Database` :param database: database to use """ @@ -630,7 +638,7 @@ class SnapshotCheckout(object): Caller must *not* use the snapshot to perform API requests outside the scope of the context manager. - :type database: :class:`~google.cloud.spanner.database.Database` + :type database: :class:`~google.cloud.spanner_v1.database.Database` :param database: database to use :type kw: dict @@ -657,7 +665,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): class BatchSnapshot(object): """Wrapper for generating and processing read / query batches. - :type database: :class:`~google.cloud.spanner.database.Database` + :type database: :class:`~google.cloud.spanner_v1.database.Database` :param database: database to use :type read_timestamp: :class:`datetime.datetime` @@ -679,7 +687,7 @@ def __init__(self, database, read_timestamp=None, exact_staleness=None): def from_dict(cls, database, mapping): """Reconstruct an instance from a mapping. - :type database: :class:`~google.cloud.spanner.database.Database` + :type database: :class:`~google.cloud.spanner_v1.database.Database` :param database: database to use :type mapping: mapping @@ -869,12 +877,12 @@ def generate_query_batches( differ. :type query_options: - :class:`~google.cloud.spanner_v1.proto.ExecuteSqlRequest.QueryOptions` + :class:`~google.cloud.spanner_v1.ExecuteSqlRequest.QueryOptions` or :class:`dict` :param query_options: (Optional) Query optimizer configuration to use for the given query. If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.QueryOptions` + message :class:`~google.cloud.spanner_v1.QueryOptions` :rtype: iterable of dict :returns: @@ -975,16 +983,6 @@ def _check_ddl_statements(value): return tuple(value) -class RestoreInfo(object): - def __init__(self, source_type, backup_info): - self.source_type = enums.RestoreSourceType(source_type) - self.backup_info = BackupInfo.from_pb(backup_info) - - @classmethod - def from_pb(cls, pb): - return cls(pb.source_type, pb.backup_info) - - def _retry_on_aborted(func, retry_config): """Helper for :meth:`Database.execute_partitioned_dml`. diff --git a/google/cloud/spanner_v1/gapic/__init__.py b/google/cloud/spanner_v1/gapic/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/google/cloud/spanner_v1/gapic/enums.py b/google/cloud/spanner_v1/gapic/enums.py deleted file mode 100644 index 3d4a941849..0000000000 --- a/google/cloud/spanner_v1/gapic/enums.py +++ /dev/null @@ -1,129 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Wrappers for protocol buffer enum types.""" - -import enum - - -class NullValue(enum.IntEnum): - """ - ``NullValue`` is a singleton enumeration to represent the null value - for the ``Value`` type union. - - The JSON representation for ``NullValue`` is JSON ``null``. - - Attributes: - NULL_VALUE (int): Null value. - """ - - NULL_VALUE = 0 - - -class TypeCode(enum.IntEnum): - """ - ``TypeCode`` is used as part of ``Type`` to indicate the type of a - Cloud Spanner value. - - Each legal value of a type can be encoded to or decoded from a JSON - value, using the encodings described below. All Cloud Spanner values can - be ``null``, regardless of type; ``null``\ s are always encoded as a - JSON ``null``. - - Attributes: - TYPE_CODE_UNSPECIFIED (int): Not specified. - BOOL (int): Encoded as JSON ``true`` or ``false``. - INT64 (int): Encoded as ``string``, in decimal format. - FLOAT64 (int): Encoded as ``number``, or the strings ``"NaN"``, ``"Infinity"``, or - ``"-Infinity"``. - TIMESTAMP (int): Encoded as ``string`` in RFC 3339 timestamp format. The time zone - must be present, and must be ``"Z"``. - - If the schema has the column option ``allow_commit_timestamp=true``, the - placeholder string ``"spanner.commit_timestamp()"`` can be used to - instruct the system to insert the commit timestamp associated with the - transaction commit. - DATE (int): Encoded as ``string`` in RFC 3339 date format. - STRING (int): Encoded as ``string``. - BYTES (int): Encoded as a base64-encoded ``string``, as described in RFC 4648, - section 4. - ARRAY (int): Encoded as ``list``, where the list elements are represented - according to ``array_element_type``. - STRUCT (int): Encoded as ``list``, where list element ``i`` is represented - according to - [struct_type.fields[i]][google.spanner.v1.StructType.fields]. - NUMERIC (int): Encoded as ``string``, in decimal format or scientific notation - format. Decimal format: \ ``[+-]Digits[.[Digits]]`` or - \``+-\ ``.Digits`` - - Scientific notation: - \ ``[+-]Digits[.[Digits]][ExponentIndicator[+-]Digits]`` or - \``+-\ ``.Digits[ExponentIndicator[+-]Digits]`` (ExponentIndicator is - \`"e"\` or \`"E"`) - """ - - TYPE_CODE_UNSPECIFIED = 0 - BOOL = 1 - INT64 = 2 - FLOAT64 = 3 - TIMESTAMP = 4 - DATE = 5 - STRING = 6 - BYTES = 7 - ARRAY = 8 - STRUCT = 9 - NUMERIC = 10 - - -class ExecuteSqlRequest(object): - class QueryMode(enum.IntEnum): - """ - Mode in which the statement must be processed. - - Attributes: - NORMAL (int): The default mode. Only the statement results are returned. - PLAN (int): This mode returns only the query plan, without any results or - execution statistics information. - PROFILE (int): This mode returns both the query plan and the execution statistics along - with the results. - """ - - NORMAL = 0 - PLAN = 1 - PROFILE = 2 - - -class PlanNode(object): - class Kind(enum.IntEnum): - """ - The kind of ``PlanNode``. Distinguishes between the two different - kinds of nodes that can appear in a query plan. - - Attributes: - KIND_UNSPECIFIED (int): Not specified. - RELATIONAL (int): Denotes a Relational operator node in the expression tree. - Relational operators represent iterative processing of rows during query - execution. For example, a ``TableScan`` operation that reads rows from a - table. - SCALAR (int): Denotes a Scalar node in the expression tree. Scalar nodes represent - non-iterable entities in the query plan. For example, constants or - arithmetic operators appearing inside predicate expressions or references - to column names. - """ - - KIND_UNSPECIFIED = 0 - RELATIONAL = 1 - SCALAR = 2 diff --git a/google/cloud/spanner_v1/gapic/spanner_client.py b/google/cloud/spanner_v1/gapic/spanner_client.py deleted file mode 100644 index c0454761a0..0000000000 --- a/google/cloud/spanner_v1/gapic/spanner_client.py +++ /dev/null @@ -1,1913 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.spanner.v1 Spanner API.""" - -import functools -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.page_iterator -import google.api_core.path_template -import google.api_core.protobuf_helpers -import grpc - -from google.cloud.spanner_v1.gapic import enums -from google.cloud.spanner_v1.gapic import spanner_client_config -from google.cloud.spanner_v1.gapic.transports import spanner_grpc_transport -from google.cloud.spanner_v1.proto import keys_pb2 -from google.cloud.spanner_v1.proto import mutation_pb2 -from google.cloud.spanner_v1.proto import result_set_pb2 -from google.cloud.spanner_v1.proto import spanner_pb2 -from google.cloud.spanner_v1.proto import spanner_pb2_grpc -from google.cloud.spanner_v1.proto import transaction_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import struct_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-spanner").version - - -class SpannerClient(object): - """ - Cloud Spanner API - - The Cloud Spanner API can be used to manage sessions and execute - transactions on data stored in Cloud Spanner databases. - """ - - SERVICE_ADDRESS = "spanner.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.spanner.v1.Spanner" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - SpannerClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @classmethod - def database_path(cls, project, instance, database): - """Return a fully-qualified database string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}/databases/{database}", - project=project, - instance=instance, - database=database, - ) - - @classmethod - def session_path(cls, project, instance, database, session): - """Return a fully-qualified session string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}/databases/{database}/sessions/{session}", - project=project, - instance=instance, - database=database, - session=session, - ) - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.SpannerGrpcTransport, - Callable[[~.Credentials, type], ~.SpannerGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = spanner_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=spanner_grpc_transport.SpannerGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = spanner_grpc_transport.SpannerGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME] - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def create_session( - self, - database, - session=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a new session. A session can be used to perform transactions - that read and/or modify data in a Cloud Spanner database. Sessions are - meant to be reused for many consecutive transactions. - - Sessions can only execute one transaction at a time. To execute multiple - concurrent read-write/write-only transactions, create multiple sessions. - Note that standalone reads and queries use a transaction internally, and - count toward the one transaction limit. - - Active sessions use additional server resources, so it is a good idea to - delete idle and unneeded sessions. Aside from explicit deletes, Cloud - Spanner may delete sessions for which no operations are sent for more - than an hour. If a session is deleted, requests to it return - ``NOT_FOUND``. - - Idle sessions can be kept alive by sending a trivial SQL query - periodically, e.g., ``"SELECT 1"``. - - Example: - >>> from google.cloud import spanner_v1 - >>> - >>> client = spanner_v1.SpannerClient() - >>> - >>> database = client.database_path('[PROJECT]', '[INSTANCE]', '[DATABASE]') - >>> - >>> response = client.create_session(database) - - Args: - database (str): Required. The database in which the new session is created. - session (Union[dict, ~google.cloud.spanner_v1.types.Session]): The session to create. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.Session` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_v1.types.Session` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_session" not in self._inner_api_calls: - self._inner_api_calls[ - "create_session" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_session, - default_retry=self._method_configs["CreateSession"].retry, - default_timeout=self._method_configs["CreateSession"].timeout, - client_info=self._client_info, - ) - - request = spanner_pb2.CreateSessionRequest(database=database, session=session) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("database", database)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["create_session"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def batch_create_sessions( - self, - database, - session_count, - session_template=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates multiple new sessions. - - This API can be used to initialize a session cache on the clients. - See https://goo.gl/TgSFN2 for best practices on session cache management. - - Example: - >>> from google.cloud import spanner_v1 - >>> - >>> client = spanner_v1.SpannerClient() - >>> - >>> database = client.database_path('[PROJECT]', '[INSTANCE]', '[DATABASE]') - >>> - >>> # TODO: Initialize `session_count`: - >>> session_count = 0 - >>> - >>> response = client.batch_create_sessions(database, session_count) - - Args: - database (str): Required. The database in which the new sessions are created. - session_count (int): Required. The number of sessions to be created in this batch call. - The API may return fewer than the requested number of sessions. If a - specific number of sessions are desired, the client can make additional - calls to BatchCreateSessions (adjusting ``session_count`` as necessary). - session_template (Union[dict, ~google.cloud.spanner_v1.types.Session]): Parameters to be applied to each created session. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.Session` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_v1.types.BatchCreateSessionsResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "batch_create_sessions" not in self._inner_api_calls: - self._inner_api_calls[ - "batch_create_sessions" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.batch_create_sessions, - default_retry=self._method_configs["BatchCreateSessions"].retry, - default_timeout=self._method_configs["BatchCreateSessions"].timeout, - client_info=self._client_info, - ) - - request = spanner_pb2.BatchCreateSessionsRequest( - database=database, - session_count=session_count, - session_template=session_template, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("database", database)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["batch_create_sessions"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_session( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets a session. Returns ``NOT_FOUND`` if the session does not exist. - This is mainly useful for determining whether a session is still alive. - - Example: - >>> from google.cloud import spanner_v1 - >>> - >>> client = spanner_v1.SpannerClient() - >>> - >>> name = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') - >>> - >>> response = client.get_session(name) - - Args: - name (str): Required. The name of the session to retrieve. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_v1.types.Session` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_session" not in self._inner_api_calls: - self._inner_api_calls[ - "get_session" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_session, - default_retry=self._method_configs["GetSession"].retry, - default_timeout=self._method_configs["GetSession"].timeout, - client_info=self._client_info, - ) - - request = spanner_pb2.GetSessionRequest(name=name) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_session"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_sessions( - self, - database, - page_size=None, - filter_=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists all sessions in a given database. - - Example: - >>> from google.cloud import spanner_v1 - >>> - >>> client = spanner_v1.SpannerClient() - >>> - >>> database = client.database_path('[PROJECT]', '[INSTANCE]', '[DATABASE]') - >>> - >>> # Iterate over all results - >>> for element in client.list_sessions(database): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_sessions(database).pages: - ... for element in page: - ... # process element - ... pass - - Args: - database (str): Required. The database in which to list sessions. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - filter_ (str): An expression for filtering the results of the request. Filter rules - are case insensitive. The fields eligible for filtering are: - - - ``labels.key`` where key is the name of a label - - Some examples of using filters are: - - - ``labels.env:*`` --> The session has the label "env". - - ``labels.env:dev`` --> The session has the label "env" and the value - of the label contains the string "dev". - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.spanner_v1.types.Session` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_sessions" not in self._inner_api_calls: - self._inner_api_calls[ - "list_sessions" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_sessions, - default_retry=self._method_configs["ListSessions"].retry, - default_timeout=self._method_configs["ListSessions"].timeout, - client_info=self._client_info, - ) - - request = spanner_pb2.ListSessionsRequest( - database=database, page_size=page_size, filter=filter_ - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("database", database)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_sessions"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="sessions", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def delete_session( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Ends a session, releasing server resources associated with it. This will - asynchronously trigger cancellation of any operations that are running with - this session. - - Example: - >>> from google.cloud import spanner_v1 - >>> - >>> client = spanner_v1.SpannerClient() - >>> - >>> name = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') - >>> - >>> client.delete_session(name) - - Args: - name (str): Required. The name of the session to delete. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_session" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_session" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_session, - default_retry=self._method_configs["DeleteSession"].retry, - default_timeout=self._method_configs["DeleteSession"].timeout, - client_info=self._client_info, - ) - - request = spanner_pb2.DeleteSessionRequest(name=name) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_session"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def execute_sql( - self, - session, - sql, - transaction=None, - params=None, - param_types=None, - resume_token=None, - query_mode=None, - partition_token=None, - seqno=None, - query_options=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Executes an SQL statement, returning all results in a single reply. - This method cannot be used to return a result set larger than 10 MiB; if - the query yields more data than that, the query fails with a - ``FAILED_PRECONDITION`` error. - - Operations inside read-write transactions might return ``ABORTED``. If - this occurs, the application should restart the transaction from the - beginning. See ``Transaction`` for more details. - - Larger result sets can be fetched in streaming fashion by calling - ``ExecuteStreamingSql`` instead. - - Example: - >>> from google.cloud import spanner_v1 - >>> - >>> client = spanner_v1.SpannerClient() - >>> - >>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') - >>> - >>> # TODO: Initialize `sql`: - >>> sql = '' - >>> - >>> response = client.execute_sql(session, sql) - - Args: - session (str): Required. The session in which the SQL query should be performed. - sql (str): Required. The SQL string. - transaction (Union[dict, ~google.cloud.spanner_v1.types.TransactionSelector]): The transaction to use. - - For queries, if none is provided, the default is a temporary read-only - transaction with strong concurrency. - - Standard DML statements require a read-write transaction. To protect - against replays, single-use transactions are not supported. The caller - must either supply an existing transaction ID or begin a new transaction. - - Partitioned DML requires an existing Partitioned DML transaction ID. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.TransactionSelector` - params (Union[dict, ~google.cloud.spanner_v1.types.Struct]): Parameter names and values that bind to placeholders in the SQL - string. - - A parameter placeholder consists of the ``@`` character followed by the - parameter name (for example, ``@firstName``). Parameter names can - contain letters, numbers, and underscores. - - Parameters can appear anywhere that a literal value is expected. The - same parameter name can be used more than once, for example: - - ``"WHERE id > @msg_id AND id < @msg_id + 100"`` - - It is an error to execute a SQL statement with unbound parameters. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.Struct` - param_types (dict[str -> Union[dict, ~google.cloud.spanner_v1.types.Type]]): It is not always possible for Cloud Spanner to infer the right SQL - type from a JSON value. For example, values of type ``BYTES`` and values - of type ``STRING`` both appear in ``params`` as JSON strings. - - In these cases, ``param_types`` can be used to specify the exact SQL - type for some or all of the SQL statement parameters. See the definition - of ``Type`` for more information about SQL types. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.Type` - resume_token (bytes): If this request is resuming a previously interrupted SQL statement - execution, ``resume_token`` should be copied from the last - ``PartialResultSet`` yielded before the interruption. Doing this enables - the new SQL statement execution to resume where the last one left off. - The rest of the request parameters must exactly match the request that - yielded this token. - query_mode (~google.cloud.spanner_v1.types.QueryMode): Used to control the amount of debugging information returned in - ``ResultSetStats``. If ``partition_token`` is set, ``query_mode`` can - only be set to ``QueryMode.NORMAL``. - partition_token (bytes): If present, results will be restricted to the specified partition - previously created using PartitionQuery(). There must be an exact match - for the values of fields common to this message and the - PartitionQueryRequest message used to create this partition_token. - seqno (long): A per-transaction sequence number used to identify this request. This field - makes each request idempotent such that if the request is received multiple - times, at most one will succeed. - - The sequence number must be monotonically increasing within the - transaction. If a request arrives for the first time with an out-of-order - sequence number, the transaction may be aborted. Replays of previously - handled requests will yield the same response as the first execution. - - Required for DML statements. Ignored for queries. - query_options (Union[dict, ~google.cloud.spanner_v1.types.QueryOptions]): Query optimizer configuration to use for the given query. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.QueryOptions` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_v1.types.ResultSet` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "execute_sql" not in self._inner_api_calls: - self._inner_api_calls[ - "execute_sql" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.execute_sql, - default_retry=self._method_configs["ExecuteSql"].retry, - default_timeout=self._method_configs["ExecuteSql"].timeout, - client_info=self._client_info, - ) - - request = spanner_pb2.ExecuteSqlRequest( - session=session, - sql=sql, - transaction=transaction, - params=params, - param_types=param_types, - resume_token=resume_token, - query_mode=query_mode, - partition_token=partition_token, - seqno=seqno, - query_options=query_options, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("session", session)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["execute_sql"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def execute_streaming_sql( - self, - session, - sql, - transaction=None, - params=None, - param_types=None, - resume_token=None, - query_mode=None, - partition_token=None, - seqno=None, - query_options=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Like ``ExecuteSql``, except returns the result set as a stream. - Unlike ``ExecuteSql``, there is no limit on the size of the returned - result set. However, no individual row in the result set can exceed 100 - MiB, and no column value can exceed 10 MiB. - - Example: - >>> from google.cloud import spanner_v1 - >>> - >>> client = spanner_v1.SpannerClient() - >>> - >>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') - >>> - >>> # TODO: Initialize `sql`: - >>> sql = '' - >>> - >>> for element in client.execute_streaming_sql(session, sql): - ... # process element - ... pass - - Args: - session (str): Required. The session in which the SQL query should be performed. - sql (str): Required. The SQL string. - transaction (Union[dict, ~google.cloud.spanner_v1.types.TransactionSelector]): The transaction to use. - - For queries, if none is provided, the default is a temporary read-only - transaction with strong concurrency. - - Standard DML statements require a read-write transaction. To protect - against replays, single-use transactions are not supported. The caller - must either supply an existing transaction ID or begin a new transaction. - - Partitioned DML requires an existing Partitioned DML transaction ID. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.TransactionSelector` - params (Union[dict, ~google.cloud.spanner_v1.types.Struct]): Parameter names and values that bind to placeholders in the SQL - string. - - A parameter placeholder consists of the ``@`` character followed by the - parameter name (for example, ``@firstName``). Parameter names can - contain letters, numbers, and underscores. - - Parameters can appear anywhere that a literal value is expected. The - same parameter name can be used more than once, for example: - - ``"WHERE id > @msg_id AND id < @msg_id + 100"`` - - It is an error to execute a SQL statement with unbound parameters. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.Struct` - param_types (dict[str -> Union[dict, ~google.cloud.spanner_v1.types.Type]]): It is not always possible for Cloud Spanner to infer the right SQL - type from a JSON value. For example, values of type ``BYTES`` and values - of type ``STRING`` both appear in ``params`` as JSON strings. - - In these cases, ``param_types`` can be used to specify the exact SQL - type for some or all of the SQL statement parameters. See the definition - of ``Type`` for more information about SQL types. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.Type` - resume_token (bytes): If this request is resuming a previously interrupted SQL statement - execution, ``resume_token`` should be copied from the last - ``PartialResultSet`` yielded before the interruption. Doing this enables - the new SQL statement execution to resume where the last one left off. - The rest of the request parameters must exactly match the request that - yielded this token. - query_mode (~google.cloud.spanner_v1.types.QueryMode): Used to control the amount of debugging information returned in - ``ResultSetStats``. If ``partition_token`` is set, ``query_mode`` can - only be set to ``QueryMode.NORMAL``. - partition_token (bytes): If present, results will be restricted to the specified partition - previously created using PartitionQuery(). There must be an exact match - for the values of fields common to this message and the - PartitionQueryRequest message used to create this partition_token. - seqno (long): A per-transaction sequence number used to identify this request. This field - makes each request idempotent such that if the request is received multiple - times, at most one will succeed. - - The sequence number must be monotonically increasing within the - transaction. If a request arrives for the first time with an out-of-order - sequence number, the transaction may be aborted. Replays of previously - handled requests will yield the same response as the first execution. - - Required for DML statements. Ignored for queries. - query_options (Union[dict, ~google.cloud.spanner_v1.types.QueryOptions]): Query optimizer configuration to use for the given query. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.QueryOptions` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - Iterable[~google.cloud.spanner_v1.types.PartialResultSet]. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "execute_streaming_sql" not in self._inner_api_calls: - self._inner_api_calls[ - "execute_streaming_sql" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.execute_streaming_sql, - default_retry=self._method_configs["ExecuteStreamingSql"].retry, - default_timeout=self._method_configs["ExecuteStreamingSql"].timeout, - client_info=self._client_info, - ) - - request = spanner_pb2.ExecuteSqlRequest( - session=session, - sql=sql, - transaction=transaction, - params=params, - param_types=param_types, - resume_token=resume_token, - query_mode=query_mode, - partition_token=partition_token, - seqno=seqno, - query_options=query_options, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("session", session)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["execute_streaming_sql"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def execute_batch_dml( - self, - session, - transaction, - statements, - seqno, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Executes a batch of SQL DML statements. This method allows many - statements to be run with lower latency than submitting them - sequentially with ``ExecuteSql``. - - Statements are executed in sequential order. A request can succeed even - if a statement fails. The ``ExecuteBatchDmlResponse.status`` field in - the response provides information about the statement that failed. - Clients must inspect this field to determine whether an error occurred. - - Execution stops after the first failed statement; the remaining - statements are not executed. - - Example: - >>> from google.cloud import spanner_v1 - >>> - >>> client = spanner_v1.SpannerClient() - >>> - >>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') - >>> - >>> # TODO: Initialize `transaction`: - >>> transaction = {} - >>> - >>> # TODO: Initialize `statements`: - >>> statements = [] - >>> - >>> # TODO: Initialize `seqno`: - >>> seqno = 0 - >>> - >>> response = client.execute_batch_dml(session, transaction, statements, seqno) - - Args: - session (str): Required. The session in which the DML statements should be performed. - transaction (Union[dict, ~google.cloud.spanner_v1.types.TransactionSelector]): Required. The transaction to use. Must be a read-write transaction. - - To protect against replays, single-use transactions are not supported. The - caller must either supply an existing transaction ID or begin a new - transaction. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.TransactionSelector` - statements (list[Union[dict, ~google.cloud.spanner_v1.types.Statement]]): Required. The list of statements to execute in this batch. - Statements are executed serially, such that the effects of statement - ``i`` are visible to statement ``i+1``. Each statement must be a DML - statement. Execution stops at the first failed statement; the remaining - statements are not executed. - - Callers must provide at least one statement. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.Statement` - seqno (long): Required. A per-transaction sequence number used to identify this request. This field - makes each request idempotent such that if the request is received multiple - times, at most one will succeed. - - The sequence number must be monotonically increasing within the - transaction. If a request arrives for the first time with an out-of-order - sequence number, the transaction may be aborted. Replays of previously - handled requests will yield the same response as the first execution. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_v1.types.ExecuteBatchDmlResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "execute_batch_dml" not in self._inner_api_calls: - self._inner_api_calls[ - "execute_batch_dml" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.execute_batch_dml, - default_retry=self._method_configs["ExecuteBatchDml"].retry, - default_timeout=self._method_configs["ExecuteBatchDml"].timeout, - client_info=self._client_info, - ) - - request = spanner_pb2.ExecuteBatchDmlRequest( - session=session, transaction=transaction, statements=statements, seqno=seqno - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("session", session)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["execute_batch_dml"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def read( - self, - session, - table, - columns, - key_set, - transaction=None, - index=None, - limit=None, - resume_token=None, - partition_token=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Reads rows from the database using key lookups and scans, as a - simple key/value style alternative to ``ExecuteSql``. This method cannot - be used to return a result set larger than 10 MiB; if the read matches - more data than that, the read fails with a ``FAILED_PRECONDITION`` - error. - - Reads inside read-write transactions might return ``ABORTED``. If this - occurs, the application should restart the transaction from the - beginning. See ``Transaction`` for more details. - - Larger result sets can be yielded in streaming fashion by calling - ``StreamingRead`` instead. - - Example: - >>> from google.cloud import spanner_v1 - >>> - >>> client = spanner_v1.SpannerClient() - >>> - >>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') - >>> - >>> # TODO: Initialize `table`: - >>> table = '' - >>> - >>> # TODO: Initialize `columns`: - >>> columns = [] - >>> - >>> # TODO: Initialize `key_set`: - >>> key_set = {} - >>> - >>> response = client.read(session, table, columns, key_set) - - Args: - session (str): Required. The session in which the read should be performed. - table (str): Required. The name of the table in the database to be read. - columns (list[str]): Required. The columns of ``table`` to be returned for each row - matching this request. - key_set (Union[dict, ~google.cloud.spanner_v1.types.KeySet]): Required. ``key_set`` identifies the rows to be yielded. ``key_set`` - names the primary keys of the rows in ``table`` to be yielded, unless - ``index`` is present. If ``index`` is present, then ``key_set`` instead - names index keys in ``index``. - - If the ``partition_token`` field is empty, rows are yielded in table - primary key order (if ``index`` is empty) or index key order (if - ``index`` is non-empty). If the ``partition_token`` field is not empty, - rows will be yielded in an unspecified order. - - It is not an error for the ``key_set`` to name rows that do not exist in - the database. Read yields nothing for nonexistent rows. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.KeySet` - transaction (Union[dict, ~google.cloud.spanner_v1.types.TransactionSelector]): The transaction to use. If none is provided, the default is a - temporary read-only transaction with strong concurrency. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.TransactionSelector` - index (str): If non-empty, the name of an index on ``table``. This index is used - instead of the table primary key when interpreting ``key_set`` and - sorting result rows. See ``key_set`` for further information. - limit (long): If greater than zero, only the first ``limit`` rows are yielded. If - ``limit`` is zero, the default is no limit. A limit cannot be specified - if ``partition_token`` is set. - resume_token (bytes): If this request is resuming a previously interrupted read, - ``resume_token`` should be copied from the last ``PartialResultSet`` - yielded before the interruption. Doing this enables the new read to - resume where the last read left off. The rest of the request parameters - must exactly match the request that yielded this token. - partition_token (bytes): If present, results will be restricted to the specified partition - previously created using PartitionRead(). There must be an exact match - for the values of fields common to this message and the - PartitionReadRequest message used to create this partition_token. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_v1.types.ResultSet` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "read" not in self._inner_api_calls: - self._inner_api_calls["read"] = google.api_core.gapic_v1.method.wrap_method( - self.transport.read, - default_retry=self._method_configs["Read"].retry, - default_timeout=self._method_configs["Read"].timeout, - client_info=self._client_info, - ) - - request = spanner_pb2.ReadRequest( - session=session, - table=table, - columns=columns, - key_set=key_set, - transaction=transaction, - index=index, - limit=limit, - resume_token=resume_token, - partition_token=partition_token, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("session", session)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["read"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def streaming_read( - self, - session, - table, - columns, - key_set, - transaction=None, - index=None, - limit=None, - resume_token=None, - partition_token=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Like ``Read``, except returns the result set as a stream. Unlike - ``Read``, there is no limit on the size of the returned result set. - However, no individual row in the result set can exceed 100 MiB, and no - column value can exceed 10 MiB. - - Example: - >>> from google.cloud import spanner_v1 - >>> - >>> client = spanner_v1.SpannerClient() - >>> - >>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') - >>> - >>> # TODO: Initialize `table`: - >>> table = '' - >>> - >>> # TODO: Initialize `columns`: - >>> columns = [] - >>> - >>> # TODO: Initialize `key_set`: - >>> key_set = {} - >>> - >>> for element in client.streaming_read(session, table, columns, key_set): - ... # process element - ... pass - - Args: - session (str): Required. The session in which the read should be performed. - table (str): Required. The name of the table in the database to be read. - columns (list[str]): Required. The columns of ``table`` to be returned for each row - matching this request. - key_set (Union[dict, ~google.cloud.spanner_v1.types.KeySet]): Required. ``key_set`` identifies the rows to be yielded. ``key_set`` - names the primary keys of the rows in ``table`` to be yielded, unless - ``index`` is present. If ``index`` is present, then ``key_set`` instead - names index keys in ``index``. - - If the ``partition_token`` field is empty, rows are yielded in table - primary key order (if ``index`` is empty) or index key order (if - ``index`` is non-empty). If the ``partition_token`` field is not empty, - rows will be yielded in an unspecified order. - - It is not an error for the ``key_set`` to name rows that do not exist in - the database. Read yields nothing for nonexistent rows. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.KeySet` - transaction (Union[dict, ~google.cloud.spanner_v1.types.TransactionSelector]): The transaction to use. If none is provided, the default is a - temporary read-only transaction with strong concurrency. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.TransactionSelector` - index (str): If non-empty, the name of an index on ``table``. This index is used - instead of the table primary key when interpreting ``key_set`` and - sorting result rows. See ``key_set`` for further information. - limit (long): If greater than zero, only the first ``limit`` rows are yielded. If - ``limit`` is zero, the default is no limit. A limit cannot be specified - if ``partition_token`` is set. - resume_token (bytes): If this request is resuming a previously interrupted read, - ``resume_token`` should be copied from the last ``PartialResultSet`` - yielded before the interruption. Doing this enables the new read to - resume where the last read left off. The rest of the request parameters - must exactly match the request that yielded this token. - partition_token (bytes): If present, results will be restricted to the specified partition - previously created using PartitionRead(). There must be an exact match - for the values of fields common to this message and the - PartitionReadRequest message used to create this partition_token. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - Iterable[~google.cloud.spanner_v1.types.PartialResultSet]. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "streaming_read" not in self._inner_api_calls: - self._inner_api_calls[ - "streaming_read" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.streaming_read, - default_retry=self._method_configs["StreamingRead"].retry, - default_timeout=self._method_configs["StreamingRead"].timeout, - client_info=self._client_info, - ) - - request = spanner_pb2.ReadRequest( - session=session, - table=table, - columns=columns, - key_set=key_set, - transaction=transaction, - index=index, - limit=limit, - resume_token=resume_token, - partition_token=partition_token, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("session", session)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["streaming_read"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def begin_transaction( - self, - session, - options_, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Begins a new transaction. This step can often be skipped: ``Read``, - ``ExecuteSql`` and ``Commit`` can begin a new transaction as a - side-effect. - - Example: - >>> from google.cloud import spanner_v1 - >>> - >>> client = spanner_v1.SpannerClient() - >>> - >>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') - >>> - >>> # TODO: Initialize `options_`: - >>> options_ = {} - >>> - >>> response = client.begin_transaction(session, options_) - - Args: - session (str): Required. The session in which the transaction runs. - options_ (Union[dict, ~google.cloud.spanner_v1.types.TransactionOptions]): Required. Options for the new transaction. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.TransactionOptions` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_v1.types.Transaction` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "begin_transaction" not in self._inner_api_calls: - self._inner_api_calls[ - "begin_transaction" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.begin_transaction, - default_retry=self._method_configs["BeginTransaction"].retry, - default_timeout=self._method_configs["BeginTransaction"].timeout, - client_info=self._client_info, - ) - - request = spanner_pb2.BeginTransactionRequest(session=session, options=options_) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("session", session)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["begin_transaction"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def commit( - self, - session, - transaction_id=None, - single_use_transaction=None, - mutations=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Commits a transaction. The request includes the mutations to be - applied to rows in the database. - - ``Commit`` might return an ``ABORTED`` error. This can occur at any - time; commonly, the cause is conflicts with concurrent transactions. - However, it can also happen for a variety of other reasons. If - ``Commit`` returns ``ABORTED``, the caller should re-attempt the - transaction from the beginning, re-using the same session. - - Example: - >>> from google.cloud import spanner_v1 - >>> - >>> client = spanner_v1.SpannerClient() - >>> - >>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') - >>> - >>> response = client.commit(session) - - Args: - session (str): Required. The session in which the transaction to be committed is running. - transaction_id (bytes): Commit a previously-started transaction. - single_use_transaction (Union[dict, ~google.cloud.spanner_v1.types.TransactionOptions]): Execute mutations in a temporary transaction. Note that unlike - commit of a previously-started transaction, commit with a temporary - transaction is non-idempotent. That is, if the ``CommitRequest`` is sent - to Cloud Spanner more than once (for instance, due to retries in the - application, or in the transport library), it is possible that the - mutations are executed more than once. If this is undesirable, use - ``BeginTransaction`` and ``Commit`` instead. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.TransactionOptions` - mutations (list[Union[dict, ~google.cloud.spanner_v1.types.Mutation]]): The mutations to be executed when this transaction commits. All - mutations are applied atomically, in the order they appear in - this list. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.Mutation` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_v1.types.CommitResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "commit" not in self._inner_api_calls: - self._inner_api_calls[ - "commit" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.commit, - default_retry=self._method_configs["Commit"].retry, - default_timeout=self._method_configs["Commit"].timeout, - client_info=self._client_info, - ) - - # Sanity check: We have some fields which are mutually exclusive; - # raise ValueError if more than one is sent. - google.api_core.protobuf_helpers.check_oneof( - transaction_id=transaction_id, single_use_transaction=single_use_transaction - ) - - request = spanner_pb2.CommitRequest( - session=session, - transaction_id=transaction_id, - single_use_transaction=single_use_transaction, - mutations=mutations, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("session", session)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["commit"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def rollback( - self, - session, - transaction_id, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Rolls back a transaction, releasing any locks it holds. It is a good - idea to call this for any transaction that includes one or more ``Read`` - or ``ExecuteSql`` requests and ultimately decides not to commit. - - ``Rollback`` returns ``OK`` if it successfully aborts the transaction, - the transaction was already aborted, or the transaction is not found. - ``Rollback`` never returns ``ABORTED``. - - Example: - >>> from google.cloud import spanner_v1 - >>> - >>> client = spanner_v1.SpannerClient() - >>> - >>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') - >>> - >>> # TODO: Initialize `transaction_id`: - >>> transaction_id = b'' - >>> - >>> client.rollback(session, transaction_id) - - Args: - session (str): Required. The session in which the transaction to roll back is running. - transaction_id (bytes): Required. The transaction to roll back. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "rollback" not in self._inner_api_calls: - self._inner_api_calls[ - "rollback" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.rollback, - default_retry=self._method_configs["Rollback"].retry, - default_timeout=self._method_configs["Rollback"].timeout, - client_info=self._client_info, - ) - - request = spanner_pb2.RollbackRequest( - session=session, transaction_id=transaction_id - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("session", session)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["rollback"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def partition_query( - self, - session, - sql, - transaction=None, - params=None, - param_types=None, - partition_options=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a set of partition tokens that can be used to execute a - query operation in parallel. Each of the returned partition tokens can - be used by ``ExecuteStreamingSql`` to specify a subset of the query - result to read. The same session and read-only transaction must be used - by the PartitionQueryRequest used to create the partition tokens and the - ExecuteSqlRequests that use the partition tokens. - - Partition tokens become invalid when the session used to create them is - deleted, is idle for too long, begins a new transaction, or becomes too - old. When any of these happen, it is not possible to resume the query, - and the whole operation must be restarted from the beginning. - - Example: - >>> from google.cloud import spanner_v1 - >>> - >>> client = spanner_v1.SpannerClient() - >>> - >>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') - >>> - >>> # TODO: Initialize `sql`: - >>> sql = '' - >>> - >>> response = client.partition_query(session, sql) - - Args: - session (str): Required. The session used to create the partitions. - sql (str): Required. The query request to generate partitions for. The request - will fail if the query is not root partitionable. The query plan of a - root partitionable query has a single distributed union operator. A - distributed union operator conceptually divides one or more tables into - multiple splits, remotely evaluates a subquery independently on each - split, and then unions all results. - - This must not contain DML commands, such as INSERT, UPDATE, or DELETE. - Use ``ExecuteStreamingSql`` with a PartitionedDml transaction for large, - partition-friendly DML operations. - transaction (Union[dict, ~google.cloud.spanner_v1.types.TransactionSelector]): Read only snapshot transactions are supported, read/write and single use - transactions are not. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.TransactionSelector` - params (Union[dict, ~google.cloud.spanner_v1.types.Struct]): Parameter names and values that bind to placeholders in the SQL - string. - - A parameter placeholder consists of the ``@`` character followed by the - parameter name (for example, ``@firstName``). Parameter names can - contain letters, numbers, and underscores. - - Parameters can appear anywhere that a literal value is expected. The - same parameter name can be used more than once, for example: - - ``"WHERE id > @msg_id AND id < @msg_id + 100"`` - - It is an error to execute a SQL statement with unbound parameters. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.Struct` - param_types (dict[str -> Union[dict, ~google.cloud.spanner_v1.types.Type]]): It is not always possible for Cloud Spanner to infer the right SQL - type from a JSON value. For example, values of type ``BYTES`` and values - of type ``STRING`` both appear in ``params`` as JSON strings. - - In these cases, ``param_types`` can be used to specify the exact SQL - type for some or all of the SQL query parameters. See the definition of - ``Type`` for more information about SQL types. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.Type` - partition_options (Union[dict, ~google.cloud.spanner_v1.types.PartitionOptions]): Additional options that affect how many partitions are created. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.PartitionOptions` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_v1.types.PartitionResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "partition_query" not in self._inner_api_calls: - self._inner_api_calls[ - "partition_query" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.partition_query, - default_retry=self._method_configs["PartitionQuery"].retry, - default_timeout=self._method_configs["PartitionQuery"].timeout, - client_info=self._client_info, - ) - - request = spanner_pb2.PartitionQueryRequest( - session=session, - sql=sql, - transaction=transaction, - params=params, - param_types=param_types, - partition_options=partition_options, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("session", session)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["partition_query"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def partition_read( - self, - session, - table, - key_set, - transaction=None, - index=None, - columns=None, - partition_options=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a set of partition tokens that can be used to execute a read - operation in parallel. Each of the returned partition tokens can be used - by ``StreamingRead`` to specify a subset of the read result to read. The - same session and read-only transaction must be used by the - PartitionReadRequest used to create the partition tokens and the - ReadRequests that use the partition tokens. There are no ordering - guarantees on rows returned among the returned partition tokens, or even - within each individual StreamingRead call issued with a partition_token. - - Partition tokens become invalid when the session used to create them is - deleted, is idle for too long, begins a new transaction, or becomes too - old. When any of these happen, it is not possible to resume the read, - and the whole operation must be restarted from the beginning. - - Example: - >>> from google.cloud import spanner_v1 - >>> - >>> client = spanner_v1.SpannerClient() - >>> - >>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') - >>> - >>> # TODO: Initialize `table`: - >>> table = '' - >>> - >>> # TODO: Initialize `key_set`: - >>> key_set = {} - >>> - >>> response = client.partition_read(session, table, key_set) - - Args: - session (str): Required. The session used to create the partitions. - table (str): Required. The name of the table in the database to be read. - key_set (Union[dict, ~google.cloud.spanner_v1.types.KeySet]): Required. ``key_set`` identifies the rows to be yielded. ``key_set`` - names the primary keys of the rows in ``table`` to be yielded, unless - ``index`` is present. If ``index`` is present, then ``key_set`` instead - names index keys in ``index``. - - It is not an error for the ``key_set`` to name rows that do not exist in - the database. Read yields nothing for nonexistent rows. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.KeySet` - transaction (Union[dict, ~google.cloud.spanner_v1.types.TransactionSelector]): Read only snapshot transactions are supported, read/write and single use - transactions are not. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.TransactionSelector` - index (str): If non-empty, the name of an index on ``table``. This index is used - instead of the table primary key when interpreting ``key_set`` and - sorting result rows. See ``key_set`` for further information. - columns (list[str]): The columns of ``table`` to be returned for each row matching this - request. - partition_options (Union[dict, ~google.cloud.spanner_v1.types.PartitionOptions]): Additional options that affect how many partitions are created. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.PartitionOptions` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_v1.types.PartitionResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "partition_read" not in self._inner_api_calls: - self._inner_api_calls[ - "partition_read" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.partition_read, - default_retry=self._method_configs["PartitionRead"].retry, - default_timeout=self._method_configs["PartitionRead"].timeout, - client_info=self._client_info, - ) - - request = spanner_pb2.PartitionReadRequest( - session=session, - table=table, - key_set=key_set, - transaction=transaction, - index=index, - columns=columns, - partition_options=partition_options, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("session", session)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["partition_read"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) diff --git a/google/cloud/spanner_v1/gapic/spanner_client_config.py b/google/cloud/spanner_v1/gapic/spanner_client_config.py deleted file mode 100644 index 458ea6d731..0000000000 --- a/google/cloud/spanner_v1/gapic/spanner_client_config.py +++ /dev/null @@ -1,137 +0,0 @@ -config = { - "interfaces": { - "google.spanner.v1.Spanner": { - "retry_codes": { - "retry_policy_1_codes": ["UNAVAILABLE"], - "no_retry_codes": [], - "retry_policy_3_codes": ["UNAVAILABLE"], - "retry_policy_2_codes": ["UNAVAILABLE"], - "no_retry_1_codes": [], - }, - "retry_params": { - "retry_policy_1_params": { - "initial_retry_delay_millis": 250, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 32000, - "initial_rpc_timeout_millis": 3600000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 3600000, - "total_timeout_millis": 3600000, - }, - "retry_policy_3_params": { - "initial_retry_delay_millis": 250, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 32000, - "initial_rpc_timeout_millis": 30000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 30000, - "total_timeout_millis": 30000, - }, - "retry_policy_2_params": { - "initial_retry_delay_millis": 250, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 32000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 60000, - }, - "no_retry_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 0.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 0, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 0, - "total_timeout_millis": 0, - }, - "no_retry_1_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 0.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 3600000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 3600000, - "total_timeout_millis": 3600000, - }, - }, - "methods": { - "CreateSession": { - "timeout_millis": 30000, - "retry_codes_name": "retry_policy_3_codes", - "retry_params_name": "retry_policy_3_params", - }, - "BatchCreateSessions": { - "timeout_millis": 60000, - "retry_codes_name": "retry_policy_2_codes", - "retry_params_name": "retry_policy_2_params", - }, - "GetSession": { - "timeout_millis": 30000, - "retry_codes_name": "retry_policy_3_codes", - "retry_params_name": "retry_policy_3_params", - }, - "ListSessions": { - "timeout_millis": 3600000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "DeleteSession": { - "timeout_millis": 30000, - "retry_codes_name": "retry_policy_3_codes", - "retry_params_name": "retry_policy_3_params", - }, - "ExecuteSql": { - "timeout_millis": 30000, - "retry_codes_name": "retry_policy_3_codes", - "retry_params_name": "retry_policy_3_params", - }, - "ExecuteStreamingSql": { - "timeout_millis": 3600000, - "retry_codes_name": "no_retry_1_codes", - "retry_params_name": "no_retry_1_params", - }, - "ExecuteBatchDml": { - "timeout_millis": 30000, - "retry_codes_name": "retry_policy_3_codes", - "retry_params_name": "retry_policy_3_params", - }, - "Read": { - "timeout_millis": 30000, - "retry_codes_name": "retry_policy_3_codes", - "retry_params_name": "retry_policy_3_params", - }, - "StreamingRead": { - "timeout_millis": 3600000, - "retry_codes_name": "no_retry_1_codes", - "retry_params_name": "no_retry_1_params", - }, - "BeginTransaction": { - "timeout_millis": 30000, - "retry_codes_name": "retry_policy_3_codes", - "retry_params_name": "retry_policy_3_params", - }, - "Commit": { - "timeout_millis": 3600000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "Rollback": { - "timeout_millis": 30000, - "retry_codes_name": "retry_policy_3_codes", - "retry_params_name": "retry_policy_3_params", - }, - "PartitionQuery": { - "timeout_millis": 30000, - "retry_codes_name": "retry_policy_3_codes", - "retry_params_name": "retry_policy_3_params", - }, - "PartitionRead": { - "timeout_millis": 30000, - "retry_codes_name": "retry_policy_3_codes", - "retry_params_name": "retry_policy_3_params", - }, - }, - } - } -} diff --git a/google/cloud/spanner_v1/gapic/transports/__init__.py b/google/cloud/spanner_v1/gapic/transports/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/google/cloud/spanner_v1/gapic/transports/spanner.grpc.config b/google/cloud/spanner_v1/gapic/transports/spanner.grpc.config deleted file mode 100755 index c34397a1c8..0000000000 --- a/google/cloud/spanner_v1/gapic/transports/spanner.grpc.config +++ /dev/null @@ -1,88 +0,0 @@ -channel_pool: { - max_size: 10 - max_concurrent_streams_low_watermark: 100 -} -method: { - name: "/google.spanner.v1.Spanner/CreateSession" - affinity: { - command: BIND - affinity_key: "name" - } -} -method: { - name: "/google.spanner.v1.Spanner/GetSession" - affinity: { - command: BOUND - affinity_key: "name" - } -} -method: { - name: "/google.spanner.v1.Spanner/DeleteSession" - affinity: { - command: UNBIND - affinity_key: "name" - } -} -method: { - name: "/google.spanner.v1.Spanner/ExecuteSql" - affinity: { - command: BOUND - affinity_key: "session" - } -} -method: { - name: "/google.spanner.v1.Spanner/ExecuteStreamingSql" - affinity: { - command: BOUND - affinity_key: "session" - } -} -method: { - name: "/google.spanner.v1.Spanner/Read" - affinity: { - command: BOUND - affinity_key: "session" - } -} -method: { - name: "/google.spanner.v1.Spanner/StreamingRead" - affinity: { - command: BOUND - affinity_key: "session" - } -} -method: { - name: "/google.spanner.v1.Spanner/BeginTransaction" - affinity: { - command: BOUND - affinity_key: "session" - } -} -method: { - name: "/google.spanner.v1.Spanner/Commit" - affinity: { - command: BOUND - affinity_key: "session" - } -} -method: { - name: "/google.spanner.v1.Spanner/Rollback" - affinity: { - command: BOUND - affinity_key: "session" - } -} -method: { - name: "/google.spanner.v1.Spanner/PartitionQuery" - affinity: { - command: BOUND - affinity_key: "session" - } -} -method: { - name: "/google.spanner.v1.Spanner/PartitionRead" - affinity: { - command: BOUND - affinity_key: "session" - } -} diff --git a/google/cloud/spanner_v1/gapic/transports/spanner_grpc_transport.py b/google/cloud/spanner_v1/gapic/transports/spanner_grpc_transport.py deleted file mode 100644 index 7cb2cb2ef2..0000000000 --- a/google/cloud/spanner_v1/gapic/transports/spanner_grpc_transport.py +++ /dev/null @@ -1,415 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import pkg_resources -import grpc_gcp - -import google.api_core.grpc_helpers - -from google.cloud.spanner_v1.proto import spanner_pb2_grpc - - -_GRPC_KEEPALIVE_MS = 2 * 60 * 1000 -_SPANNER_GRPC_CONFIG = "spanner.grpc.config" - - -class SpannerGrpcTransport(object): - """gRPC transport class providing stubs for - google.spanner.v1 Spanner API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ( - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/spanner.data", - ) - - def __init__( - self, channel=None, credentials=None, address="spanner.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive." - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - "grpc.keepalive_time_ms": _GRPC_KEEPALIVE_MS, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = {"spanner_stub": spanner_pb2_grpc.SpannerStub(channel)} - - @classmethod - def create_channel( - cls, address="spanner.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - grpc_gcp_config = grpc_gcp.api_config_from_text_pb( - pkg_resources.resource_string(__name__, _SPANNER_GRPC_CONFIG) - ) - options = [(grpc_gcp.API_CONFIG_CHANNEL_ARG, grpc_gcp_config)] - if "options" in kwargs: - options.extend(kwargs["options"]) - kwargs["options"] = options - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def create_session(self): - """Return the gRPC stub for :meth:`SpannerClient.create_session`. - - Creates a new session. A session can be used to perform transactions - that read and/or modify data in a Cloud Spanner database. Sessions are - meant to be reused for many consecutive transactions. - - Sessions can only execute one transaction at a time. To execute multiple - concurrent read-write/write-only transactions, create multiple sessions. - Note that standalone reads and queries use a transaction internally, and - count toward the one transaction limit. - - Active sessions use additional server resources, so it is a good idea to - delete idle and unneeded sessions. Aside from explicit deletes, Cloud - Spanner may delete sessions for which no operations are sent for more - than an hour. If a session is deleted, requests to it return - ``NOT_FOUND``. - - Idle sessions can be kept alive by sending a trivial SQL query - periodically, e.g., ``"SELECT 1"``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["spanner_stub"].CreateSession - - @property - def batch_create_sessions(self): - """Return the gRPC stub for :meth:`SpannerClient.batch_create_sessions`. - - Creates multiple new sessions. - - This API can be used to initialize a session cache on the clients. - See https://goo.gl/TgSFN2 for best practices on session cache management. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["spanner_stub"].BatchCreateSessions - - @property - def get_session(self): - """Return the gRPC stub for :meth:`SpannerClient.get_session`. - - Gets a session. Returns ``NOT_FOUND`` if the session does not exist. - This is mainly useful for determining whether a session is still alive. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["spanner_stub"].GetSession - - @property - def list_sessions(self): - """Return the gRPC stub for :meth:`SpannerClient.list_sessions`. - - Lists all sessions in a given database. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["spanner_stub"].ListSessions - - @property - def delete_session(self): - """Return the gRPC stub for :meth:`SpannerClient.delete_session`. - - Ends a session, releasing server resources associated with it. This will - asynchronously trigger cancellation of any operations that are running with - this session. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["spanner_stub"].DeleteSession - - @property - def execute_sql(self): - """Return the gRPC stub for :meth:`SpannerClient.execute_sql`. - - Executes an SQL statement, returning all results in a single reply. - This method cannot be used to return a result set larger than 10 MiB; if - the query yields more data than that, the query fails with a - ``FAILED_PRECONDITION`` error. - - Operations inside read-write transactions might return ``ABORTED``. If - this occurs, the application should restart the transaction from the - beginning. See ``Transaction`` for more details. - - Larger result sets can be fetched in streaming fashion by calling - ``ExecuteStreamingSql`` instead. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["spanner_stub"].ExecuteSql - - @property - def execute_streaming_sql(self): - """Return the gRPC stub for :meth:`SpannerClient.execute_streaming_sql`. - - Like ``ExecuteSql``, except returns the result set as a stream. - Unlike ``ExecuteSql``, there is no limit on the size of the returned - result set. However, no individual row in the result set can exceed 100 - MiB, and no column value can exceed 10 MiB. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["spanner_stub"].ExecuteStreamingSql - - @property - def execute_batch_dml(self): - """Return the gRPC stub for :meth:`SpannerClient.execute_batch_dml`. - - Executes a batch of SQL DML statements. This method allows many - statements to be run with lower latency than submitting them - sequentially with ``ExecuteSql``. - - Statements are executed in sequential order. A request can succeed even - if a statement fails. The ``ExecuteBatchDmlResponse.status`` field in - the response provides information about the statement that failed. - Clients must inspect this field to determine whether an error occurred. - - Execution stops after the first failed statement; the remaining - statements are not executed. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["spanner_stub"].ExecuteBatchDml - - @property - def read(self): - """Return the gRPC stub for :meth:`SpannerClient.read`. - - Reads rows from the database using key lookups and scans, as a - simple key/value style alternative to ``ExecuteSql``. This method cannot - be used to return a result set larger than 10 MiB; if the read matches - more data than that, the read fails with a ``FAILED_PRECONDITION`` - error. - - Reads inside read-write transactions might return ``ABORTED``. If this - occurs, the application should restart the transaction from the - beginning. See ``Transaction`` for more details. - - Larger result sets can be yielded in streaming fashion by calling - ``StreamingRead`` instead. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["spanner_stub"].Read - - @property - def streaming_read(self): - """Return the gRPC stub for :meth:`SpannerClient.streaming_read`. - - Like ``Read``, except returns the result set as a stream. Unlike - ``Read``, there is no limit on the size of the returned result set. - However, no individual row in the result set can exceed 100 MiB, and no - column value can exceed 10 MiB. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["spanner_stub"].StreamingRead - - @property - def begin_transaction(self): - """Return the gRPC stub for :meth:`SpannerClient.begin_transaction`. - - Begins a new transaction. This step can often be skipped: ``Read``, - ``ExecuteSql`` and ``Commit`` can begin a new transaction as a - side-effect. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["spanner_stub"].BeginTransaction - - @property - def commit(self): - """Return the gRPC stub for :meth:`SpannerClient.commit`. - - Commits a transaction. The request includes the mutations to be - applied to rows in the database. - - ``Commit`` might return an ``ABORTED`` error. This can occur at any - time; commonly, the cause is conflicts with concurrent transactions. - However, it can also happen for a variety of other reasons. If - ``Commit`` returns ``ABORTED``, the caller should re-attempt the - transaction from the beginning, re-using the same session. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["spanner_stub"].Commit - - @property - def rollback(self): - """Return the gRPC stub for :meth:`SpannerClient.rollback`. - - Rolls back a transaction, releasing any locks it holds. It is a good - idea to call this for any transaction that includes one or more ``Read`` - or ``ExecuteSql`` requests and ultimately decides not to commit. - - ``Rollback`` returns ``OK`` if it successfully aborts the transaction, - the transaction was already aborted, or the transaction is not found. - ``Rollback`` never returns ``ABORTED``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["spanner_stub"].Rollback - - @property - def partition_query(self): - """Return the gRPC stub for :meth:`SpannerClient.partition_query`. - - Creates a set of partition tokens that can be used to execute a - query operation in parallel. Each of the returned partition tokens can - be used by ``ExecuteStreamingSql`` to specify a subset of the query - result to read. The same session and read-only transaction must be used - by the PartitionQueryRequest used to create the partition tokens and the - ExecuteSqlRequests that use the partition tokens. - - Partition tokens become invalid when the session used to create them is - deleted, is idle for too long, begins a new transaction, or becomes too - old. When any of these happen, it is not possible to resume the query, - and the whole operation must be restarted from the beginning. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["spanner_stub"].PartitionQuery - - @property - def partition_read(self): - """Return the gRPC stub for :meth:`SpannerClient.partition_read`. - - Creates a set of partition tokens that can be used to execute a read - operation in parallel. Each of the returned partition tokens can be used - by ``StreamingRead`` to specify a subset of the read result to read. The - same session and read-only transaction must be used by the - PartitionReadRequest used to create the partition tokens and the - ReadRequests that use the partition tokens. There are no ordering - guarantees on rows returned among the returned partition tokens, or even - within each individual StreamingRead call issued with a partition_token. - - Partition tokens become invalid when the session used to create them is - deleted, is idle for too long, begins a new transaction, or becomes too - old. When any of these happen, it is not possible to resume the read, - and the whole operation must be restarted from the beginning. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["spanner_stub"].PartitionRead diff --git a/google/cloud/spanner_v1/instance.py b/google/cloud/spanner_v1/instance.py index f0809e7d81..be49dd2d84 100644 --- a/google/cloud/spanner_v1/instance.py +++ b/google/cloud/spanner_v1/instance.py @@ -14,16 +14,15 @@ """User friendly container for Cloud Spanner Instance.""" -import google.api_core.operation import re -from google.cloud.spanner_admin_instance_v1.proto import ( - spanner_instance_admin_pb2 as admin_v1_pb2, -) -from google.cloud.spanner_admin_database_v1.proto import ( - backup_pb2, - spanner_database_admin_pb2, -) +from google.cloud.spanner_admin_instance_v1 import Instance as InstancePB +from google.cloud.spanner_admin_database_v1.types import backup +from google.cloud.spanner_admin_database_v1.types import spanner_database_admin +from google.cloud.spanner_admin_database_v1 import ListBackupsRequest +from google.cloud.spanner_admin_database_v1 import ListBackupOperationsRequest +from google.cloud.spanner_admin_database_v1 import ListDatabasesRequest +from google.cloud.spanner_admin_database_v1 import ListDatabaseOperationsRequest from google.protobuf.empty_pb2 import Empty from google.protobuf.field_mask_pb2 import FieldMask @@ -32,7 +31,6 @@ from google.cloud.spanner_v1._helpers import _metadata_with_prefix from google.cloud.spanner_v1.backup import Backup from google.cloud.spanner_v1.database import Database -from google.cloud.spanner_v1.pool import BurstyPool # pylint: enable=ungrouped-imports @@ -44,26 +42,26 @@ DEFAULT_NODE_COUNT = 1 _OPERATION_METADATA_MESSAGES = ( - backup_pb2.Backup, - backup_pb2.CreateBackupMetadata, - spanner_database_admin_pb2.CreateDatabaseMetadata, - spanner_database_admin_pb2.Database, - spanner_database_admin_pb2.OptimizeRestoredDatabaseMetadata, - spanner_database_admin_pb2.RestoreDatabaseMetadata, - spanner_database_admin_pb2.UpdateDatabaseDdlMetadata, + backup.Backup, + backup.CreateBackupMetadata, + spanner_database_admin.CreateDatabaseMetadata, + spanner_database_admin.Database, + spanner_database_admin.OptimizeRestoredDatabaseMetadata, + spanner_database_admin.RestoreDatabaseMetadata, + spanner_database_admin.UpdateDatabaseDdlMetadata, ) _OPERATION_METADATA_TYPES = { - "type.googleapis.com/{}".format(message.DESCRIPTOR.full_name): message + "type.googleapis.com/{}".format(message._meta.full_name): message for message in _OPERATION_METADATA_MESSAGES } _OPERATION_RESPONSE_TYPES = { - backup_pb2.CreateBackupMetadata: backup_pb2.Backup, - spanner_database_admin_pb2.CreateDatabaseMetadata: spanner_database_admin_pb2.Database, - spanner_database_admin_pb2.OptimizeRestoredDatabaseMetadata: spanner_database_admin_pb2.Database, - spanner_database_admin_pb2.RestoreDatabaseMetadata: spanner_database_admin_pb2.Database, - spanner_database_admin_pb2.UpdateDatabaseDdlMetadata: Empty, + backup.CreateBackupMetadata: backup.Backup, + spanner_database_admin.CreateDatabaseMetadata: spanner_database_admin.Database, + spanner_database_admin.OptimizeRestoredDatabaseMetadata: spanner_database_admin.Database, + spanner_database_admin.RestoreDatabaseMetadata: spanner_database_admin.Database, + spanner_database_admin.UpdateDatabaseDdlMetadata: Empty, } @@ -239,7 +237,7 @@ def create(self): :raises Conflict: if the instance already exists """ api = self._client.instance_admin_api - instance_pb = admin_v1_pb2.Instance( + instance_pb = InstancePB( name=self.name, config=self.configuration_name, display_name=self.display_name, @@ -269,7 +267,7 @@ def exists(self): metadata = _metadata_with_prefix(self.name) try: - api.get_instance(self.name, metadata=metadata) + api.get_instance(name=self.name, metadata=metadata) except NotFound: return False @@ -286,7 +284,7 @@ def reload(self): api = self._client.instance_admin_api metadata = _metadata_with_prefix(self.name) - instance_pb = api.get_instance(self.name, metadata=metadata) + instance_pb = api.get_instance(name=self.name, metadata=metadata) self._update_from_pb(instance_pb) @@ -313,7 +311,7 @@ def update(self): :raises NotFound: if the instance does not exist """ api = self._client.instance_admin_api - instance_pb = admin_v1_pb2.Instance( + instance_pb = InstancePB( name=self.name, config=self.configuration_name, display_name=self.display_name, @@ -346,7 +344,7 @@ def delete(self): api = self._client.instance_admin_api metadata = _metadata_with_prefix(self.name) - api.delete_instance(self.name, metadata=metadata) + api.delete_instance(name=self.name, metadata=metadata) def database(self, database_id, ddl_statements=(), pool=None): """Factory to create a database within this instance. @@ -367,7 +365,7 @@ def database(self, database_id, ddl_statements=(), pool=None): """ return Database(database_id, self, ddl_statements=ddl_statements, pool=pool) - def list_databases(self, page_size=None, page_token=None): + def list_databases(self, page_size=None): """List databases for the instance. See @@ -379,41 +377,18 @@ def list_databases(self, page_size=None, page_token=None): from this request. Non-positive values are ignored. Defaults to a sensible value set by the API. - :type page_token: str - :param page_token: - Optional. If present, return the next batch of databases, using - the value, which must correspond to the ``nextPageToken`` value - returned in the previous response. Deprecated: use the ``pages`` - property of the returned iterator instead of manually passing - the token. - :rtype: :class:`~google.api._ore.page_iterator.Iterator` :returns: Iterator of :class:`~google.cloud.spanner_v1.database.Database` resources within the current instance. """ metadata = _metadata_with_prefix(self.name) + request = ListDatabasesRequest(parent=self.name, page_size=page_size) page_iter = self._client.database_admin_api.list_databases( - self.name, page_size=page_size, metadata=metadata + request=request, metadata=metadata ) - page_iter.next_page_token = page_token - page_iter.item_to_value = self._item_to_database return page_iter - def _item_to_database(self, iterator, database_pb): - """Convert a database protobuf to the native object. - - :type iterator: :class:`~google.api_core.page_iterator.Iterator` - :param iterator: The iterator that is currently in use. - - :type database_pb: :class:`~google.spanner.admin.database.v1.Database` - :param database_pb: A database returned from the API. - - :rtype: :class:`~google.cloud.spanner_v1.database.Database` - :returns: The next database in the page. - """ - return Database.from_pb(database_pb, self, pool=BurstyPool()) - def backup(self, backup_id, database="", expire_time=None): """Factory to create a backup within this instance. @@ -456,26 +431,14 @@ def list_backups(self, filter_="", page_size=None): resources within the current instance. """ metadata = _metadata_with_prefix(self.name) + request = ListBackupsRequest( + parent=self.name, filter=filter_, page_size=page_size, + ) page_iter = self._client.database_admin_api.list_backups( - self.name, filter_, page_size=page_size, metadata=metadata + request=request, metadata=metadata ) - page_iter.item_to_value = self._item_to_backup return page_iter - def _item_to_backup(self, iterator, backup_pb): - """Convert a backup protobuf to the native object. - - :type iterator: :class:`~google.api_core.page_iterator.Iterator` - :param iterator: The iterator that is currently in use. - - :type backup_pb: :class:`~google.spanner.admin.database.v1.Backup` - :param backup_pb: A backup returned from the API. - - :rtype: :class:`~google.cloud.spanner_v1.backup.Backup` - :returns: The next backup in the page. - """ - return Backup.from_pb(backup_pb, self) - def list_backup_operations(self, filter_="", page_size=None): """List backup operations for the instance. @@ -496,10 +459,12 @@ def list_backup_operations(self, filter_="", page_size=None): resources within the current instance. """ metadata = _metadata_with_prefix(self.name) + request = ListBackupOperationsRequest( + parent=self.name, filter=filter_, page_size=page_size, + ) page_iter = self._client.database_admin_api.list_backup_operations( - self.name, filter_, page_size=page_size, metadata=metadata + request=request, metadata=metadata ) - page_iter.item_to_value = self._item_to_operation return page_iter def list_database_operations(self, filter_="", page_size=None): @@ -522,27 +487,10 @@ def list_database_operations(self, filter_="", page_size=None): resources within the current instance. """ metadata = _metadata_with_prefix(self.name) + request = ListDatabaseOperationsRequest( + parent=self.name, filter=filter_, page_size=page_size, + ) page_iter = self._client.database_admin_api.list_database_operations( - self.name, filter_, page_size=page_size, metadata=metadata + request=request, metadata=metadata ) - page_iter.item_to_value = self._item_to_operation return page_iter - - def _item_to_operation(self, iterator, operation_pb): - """Convert an operation protobuf to the native object. - - :type iterator: :class:`~google.api_core.page_iterator.Iterator` - :param iterator: The iterator that is currently in use. - - :type operation_pb: :class:`~google.longrunning.operations.Operation` - :param operation_pb: An operation returned from the API. - - :rtype: :class:`~google.api_core.operation.Operation` - :returns: The next operation in the page. - """ - operations_client = self._client.database_admin_api.transport._operations_client - metadata_type = _type_string_to_type_pb(operation_pb.metadata.type_url) - response_type = _OPERATION_RESPONSE_TYPES[metadata_type] - return google.api_core.operation.from_gapic( - operation_pb, operations_client, response_type, metadata_type=metadata_type - ) diff --git a/google/cloud/spanner_v1/keyset.py b/google/cloud/spanner_v1/keyset.py index fb45882bec..269bb12f05 100644 --- a/google/cloud/spanner_v1/keyset.py +++ b/google/cloud/spanner_v1/keyset.py @@ -14,8 +14,8 @@ """Wrap representation of Spanner keys / ranges.""" -from google.cloud.spanner_v1.proto.keys_pb2 import KeyRange as KeyRangePB -from google.cloud.spanner_v1.proto.keys_pb2 import KeySet as KeySetPB +from google.cloud.spanner_v1 import KeyRangePB +from google.cloud.spanner_v1 import KeySetPB from google.cloud.spanner_v1._helpers import _make_list_value_pb from google.cloud.spanner_v1._helpers import _make_list_value_pbs @@ -68,7 +68,7 @@ def __init__( def _to_pb(self): """Construct a KeyRange protobuf. - :rtype: :class:`~google.cloud.spanner_v1.proto.keys_pb2.KeyRange` + :rtype: :class:`~google.cloud.spanner_v1.KeyRange` :returns: protobuf corresponding to this instance. """ kwargs = {} @@ -139,11 +139,11 @@ def __init__(self, keys=(), ranges=(), all_=False): def _to_pb(self): """Construct a KeySet protobuf. - :rtype: :class:`~google.cloud.spanner_v1.proto.keys_pb2.KeySet` + :rtype: :class:`~google.cloud.spanner_v1.KeySet` :returns: protobuf corresponding to this instance. """ if self.all_: - return KeySetPB(all=True) + return KeySetPB(all_=True) kwargs = {} if self.keys: diff --git a/google/cloud/spanner_v1/param_types.py b/google/cloud/spanner_v1/param_types.py index c672d818ca..8ec5ac7ace 100644 --- a/google/cloud/spanner_v1/param_types.py +++ b/google/cloud/spanner_v1/param_types.py @@ -14,30 +14,32 @@ """Types exported from this package.""" -from google.cloud.spanner_v1.proto import type_pb2 +from google.cloud.spanner_v1 import Type +from google.cloud.spanner_v1 import TypeCode +from google.cloud.spanner_v1 import StructType # Scalar parameter types -STRING = type_pb2.Type(code=type_pb2.STRING) -BYTES = type_pb2.Type(code=type_pb2.BYTES) -BOOL = type_pb2.Type(code=type_pb2.BOOL) -INT64 = type_pb2.Type(code=type_pb2.INT64) -FLOAT64 = type_pb2.Type(code=type_pb2.FLOAT64) -DATE = type_pb2.Type(code=type_pb2.DATE) -TIMESTAMP = type_pb2.Type(code=type_pb2.TIMESTAMP) -NUMERIC = type_pb2.Type(code=type_pb2.NUMERIC) +STRING = Type(code=TypeCode.STRING) +BYTES = Type(code=TypeCode.BYTES) +BOOL = Type(code=TypeCode.BOOL) +INT64 = Type(code=TypeCode.INT64) +FLOAT64 = Type(code=TypeCode.FLOAT64) +DATE = Type(code=TypeCode.DATE) +TIMESTAMP = Type(code=TypeCode.TIMESTAMP) +NUMERIC = Type(code=TypeCode.NUMERIC) def Array(element_type): # pylint: disable=invalid-name """Construct an array parameter type description protobuf. - :type element_type: :class:`type_pb2.Type` + :type element_type: :class:`~google.cloud.spanner_v1.Type` :param element_type: the type of elements of the array - :rtype: :class:`type_pb2.Type` + :rtype: :class:`google.cloud.spanner_v1.Type` :returns: the appropriate array-type protobuf """ - return type_pb2.Type(code=type_pb2.ARRAY, array_element_type=element_type) + return Type(code=TypeCode.ARRAY, array_element_type=element_type) def StructField(name, field_type): # pylint: disable=invalid-name @@ -46,24 +48,22 @@ def StructField(name, field_type): # pylint: disable=invalid-name :type name: str :param name: the name of the field - :type field_type: :class:`type_pb2.Type` + :type field_type: :class:`google.cloud.spanner_v1.Type` :param field_type: the type of the field - :rtype: :class:`type_pb2.StructType.Field` + :rtype: :class:`google.cloud.spanner_v1.StructType.Field` :returns: the appropriate struct-field-type protobuf """ - return type_pb2.StructType.Field(name=name, type=field_type) + return StructType.Field(name=name, type_=field_type) def Struct(fields): # pylint: disable=invalid-name """Construct a struct parameter type description protobuf. - :type fields: list of :class:`type_pb2.StructType.Field` + :type fields: list of :class:`google.cloud.spanner_v1.StructType.Field` :param fields: the fields of the struct :rtype: :class:`type_pb2.Type` :returns: the appropriate struct-type protobuf """ - return type_pb2.Type( - code=type_pb2.STRUCT, struct_type=type_pb2.StructType(fields=fields) - ) + return Type(code=TypeCode.STRUCT, struct_type=StructType(fields=fields)) diff --git a/google/cloud/spanner_v1/pool.py b/google/cloud/spanner_v1/pool.py index 2c056fc820..112c277c86 100644 --- a/google/cloud/spanner_v1/pool.py +++ b/google/cloud/spanner_v1/pool.py @@ -171,7 +171,9 @@ def bind(self, database): while not self._sessions.full(): resp = api.batch_create_sessions( - database.name, self.size - self._sessions.qsize(), metadata=metadata + database=database.name, + session_count=self.size - self._sessions.qsize(), + metadata=metadata, ) for session_pb in resp.session: session = self._new_session() @@ -362,7 +364,9 @@ def bind(self, database): while created_session_count < self.size: resp = api.batch_create_sessions( - database.name, self.size - created_session_count, metadata=metadata + database=database.name, + session_count=self.size - created_session_count, + metadata=metadata, ) for session_pb in resp.session: session = self._new_session() @@ -520,7 +524,7 @@ class SessionCheckout(object): """Context manager: hold session checked out from a pool. :type pool: concrete subclass of - :class:`~google.cloud.spanner_v1.session.AbstractSessionPool` + :class:`~google.cloud.spanner_v1.pool.AbstractSessionPool` :param pool: Pool from which to check out a session. :param kwargs: extra keyword arguments to be passed to :meth:`pool.get`. diff --git a/google/cloud/spanner_v1/proto/__init__.py b/google/cloud/spanner_v1/proto/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/google/cloud/spanner_v1/proto/keys_pb2.py b/google/cloud/spanner_v1/proto/keys_pb2.py deleted file mode 100644 index 8481775d4b..0000000000 --- a/google/cloud/spanner_v1/proto/keys_pb2.py +++ /dev/null @@ -1,381 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/spanner_v1/proto/keys.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/spanner_v1/proto/keys.proto", - package="google.spanner.v1", - syntax="proto3", - serialized_options=b"\n\025com.google.spanner.v1B\tKeysProtoP\001Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\252\002\027Google.Cloud.Spanner.V1\312\002\027Google\\Cloud\\Spanner\\V1\352\002\032Google::Cloud::Spanner::V1", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n(google/cloud/spanner_v1/proto/keys.proto\x12\x11google.spanner.v1\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1cgoogle/api/annotations.proto"\xf4\x01\n\x08KeyRange\x12\x32\n\x0cstart_closed\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.ListValueH\x00\x12\x30\n\nstart_open\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.ListValueH\x00\x12\x30\n\nend_closed\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.ListValueH\x01\x12.\n\x08\x65nd_open\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.ListValueH\x01\x42\x10\n\x0estart_key_typeB\x0e\n\x0c\x65nd_key_type"l\n\x06KeySet\x12(\n\x04keys\x18\x01 \x03(\x0b\x32\x1a.google.protobuf.ListValue\x12+\n\x06ranges\x18\x02 \x03(\x0b\x32\x1b.google.spanner.v1.KeyRange\x12\x0b\n\x03\x61ll\x18\x03 \x01(\x08\x42\xaf\x01\n\x15\x63om.google.spanner.v1B\tKeysProtoP\x01Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\xaa\x02\x17Google.Cloud.Spanner.V1\xca\x02\x17Google\\Cloud\\Spanner\\V1\xea\x02\x1aGoogle::Cloud::Spanner::V1b\x06proto3', - dependencies=[ - google_dot_protobuf_dot_struct__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_KEYRANGE = _descriptor.Descriptor( - name="KeyRange", - full_name="google.spanner.v1.KeyRange", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="start_closed", - full_name="google.spanner.v1.KeyRange.start_closed", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="start_open", - full_name="google.spanner.v1.KeyRange.start_open", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_closed", - full_name="google.spanner.v1.KeyRange.end_closed", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_open", - full_name="google.spanner.v1.KeyRange.end_open", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="start_key_type", - full_name="google.spanner.v1.KeyRange.start_key_type", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - _descriptor.OneofDescriptor( - name="end_key_type", - full_name="google.spanner.v1.KeyRange.end_key_type", - index=1, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=124, - serialized_end=368, -) - - -_KEYSET = _descriptor.Descriptor( - name="KeySet", - full_name="google.spanner.v1.KeySet", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="keys", - full_name="google.spanner.v1.KeySet.keys", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="ranges", - full_name="google.spanner.v1.KeySet.ranges", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="all", - full_name="google.spanner.v1.KeySet.all", - index=2, - number=3, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=370, - serialized_end=478, -) - -_KEYRANGE.fields_by_name[ - "start_closed" -].message_type = google_dot_protobuf_dot_struct__pb2._LISTVALUE -_KEYRANGE.fields_by_name[ - "start_open" -].message_type = google_dot_protobuf_dot_struct__pb2._LISTVALUE -_KEYRANGE.fields_by_name[ - "end_closed" -].message_type = google_dot_protobuf_dot_struct__pb2._LISTVALUE -_KEYRANGE.fields_by_name[ - "end_open" -].message_type = google_dot_protobuf_dot_struct__pb2._LISTVALUE -_KEYRANGE.oneofs_by_name["start_key_type"].fields.append( - _KEYRANGE.fields_by_name["start_closed"] -) -_KEYRANGE.fields_by_name["start_closed"].containing_oneof = _KEYRANGE.oneofs_by_name[ - "start_key_type" -] -_KEYRANGE.oneofs_by_name["start_key_type"].fields.append( - _KEYRANGE.fields_by_name["start_open"] -) -_KEYRANGE.fields_by_name["start_open"].containing_oneof = _KEYRANGE.oneofs_by_name[ - "start_key_type" -] -_KEYRANGE.oneofs_by_name["end_key_type"].fields.append( - _KEYRANGE.fields_by_name["end_closed"] -) -_KEYRANGE.fields_by_name["end_closed"].containing_oneof = _KEYRANGE.oneofs_by_name[ - "end_key_type" -] -_KEYRANGE.oneofs_by_name["end_key_type"].fields.append( - _KEYRANGE.fields_by_name["end_open"] -) -_KEYRANGE.fields_by_name["end_open"].containing_oneof = _KEYRANGE.oneofs_by_name[ - "end_key_type" -] -_KEYSET.fields_by_name[ - "keys" -].message_type = google_dot_protobuf_dot_struct__pb2._LISTVALUE -_KEYSET.fields_by_name["ranges"].message_type = _KEYRANGE -DESCRIPTOR.message_types_by_name["KeyRange"] = _KEYRANGE -DESCRIPTOR.message_types_by_name["KeySet"] = _KEYSET -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -KeyRange = _reflection.GeneratedProtocolMessageType( - "KeyRange", - (_message.Message,), - { - "DESCRIPTOR": _KEYRANGE, - "__module__": "google.cloud.spanner_v1.proto.keys_pb2", - "__doc__": """KeyRange represents a range of rows in a table or index. A range has - a start key and an end key. These keys can be open or closed, - indicating if the range includes rows with that key. Keys are - represented by lists, where the ith value in the list corresponds to - the ith component of the table or index primary key. Individual values - are encoded as described [here][google.spanner.v1.TypeCode]. For - example, consider the following table definition: :: CREATE TABLE - UserEvents ( UserName STRING(MAX), EventDate STRING(10) ) - PRIMARY KEY(UserName, EventDate); The following keys name rows in - this table: :: ["Bob", "2014-09-23"] ["Alfred", "2015-06-12"] - Since the ``UserEvents`` table’s ``PRIMARY KEY`` clause names two - columns, each ``UserEvents`` key has two elements; the first is the - ``UserName``, and the second is the ``EventDate``. Key ranges with - multiple components are interpreted lexicographically by component - using the table or index key’s declared sort order. For example, the - following range returns all events for user ``"Bob"`` that occurred in - the year 2015: :: "start_closed": ["Bob", "2015-01-01"] - "end_closed": ["Bob", "2015-12-31"] Start and end keys can omit - trailing key components. This affects the inclusion and exclusion of - rows that exactly match the provided key components: if the key is - closed, then rows that exactly match the provided components are - included; if the key is open, then rows that exactly match are not - included. For example, the following range includes all events for - ``"Bob"`` that occurred during and after the year 2000: :: - "start_closed": ["Bob", "2000-01-01"] "end_closed": ["Bob"] The - next example retrieves all events for ``"Bob"``: :: - "start_closed": ["Bob"] "end_closed": ["Bob"] To retrieve events - before the year 2000: :: "start_closed": ["Bob"] "end_open": - ["Bob", "2000-01-01"] The following range includes all rows in the - table: :: "start_closed": [] "end_closed": [] This range - returns all users whose ``UserName`` begins with any character from A - to C: :: "start_closed": ["A"] "end_open": ["D"] This range - returns all users whose ``UserName`` begins with B: :: - "start_closed": ["B"] "end_open": ["C"] Key ranges honor column - sort order. For example, suppose a table is defined as follows: :: - CREATE TABLE DescendingSortedTable { Key INT64, ... ) - PRIMARY KEY(Key DESC); The following range retrieves all rows with - key values between 1 and 100 inclusive: :: "start_closed": - ["100"] "end_closed": ["1"] Note that 100 is passed as the start, - and 1 is passed as the end, because ``Key`` is a descending column in - the schema. - - Attributes: - start_key_type: - The start key must be provided. It can be either closed or - open. - start_closed: - If the start is closed, then the range includes all rows whose - first ``len(start_closed)`` key columns exactly match - ``start_closed``. - start_open: - If the start is open, then the range excludes rows whose first - ``len(start_open)`` key columns exactly match ``start_open``. - end_key_type: - The end key must be provided. It can be either closed or open. - end_closed: - If the end is closed, then the range includes all rows whose - first ``len(end_closed)`` key columns exactly match - ``end_closed``. - end_open: - If the end is open, then the range excludes rows whose first - ``len(end_open)`` key columns exactly match ``end_open``. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.KeyRange) - }, -) -_sym_db.RegisterMessage(KeyRange) - -KeySet = _reflection.GeneratedProtocolMessageType( - "KeySet", - (_message.Message,), - { - "DESCRIPTOR": _KEYSET, - "__module__": "google.cloud.spanner_v1.proto.keys_pb2", - "__doc__": """``KeySet`` defines a collection of Cloud Spanner keys and/or key - ranges. All the keys are expected to be in the same table or index. - The keys need not be sorted in any particular way. If the same key is - specified multiple times in the set (for example if two ranges, two - keys, or a key and a range overlap), Cloud Spanner behaves as if the - key were only specified once. - - Attributes: - keys: - A list of specific keys. Entries in ``keys`` should have - exactly as many elements as there are columns in the primary - or index key with which this ``KeySet`` is used. Individual - key values are encoded as described - [here][google.spanner.v1.TypeCode]. - ranges: - A list of key ranges. See - [KeyRange][google.spanner.v1.KeyRange] for more information - about key range specifications. - all: - For convenience ``all`` can be set to ``true`` to indicate - that this ``KeySet`` matches all keys in the table or index. - Note that any keys specified in ``keys`` or ``ranges`` are - only yielded once. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.KeySet) - }, -) -_sym_db.RegisterMessage(KeySet) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/spanner_v1/proto/keys_pb2_grpc.py b/google/cloud/spanner_v1/proto/keys_pb2_grpc.py deleted file mode 100644 index 8a9393943b..0000000000 --- a/google/cloud/spanner_v1/proto/keys_pb2_grpc.py +++ /dev/null @@ -1,3 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc diff --git a/google/cloud/spanner_v1/proto/mutation_pb2.py b/google/cloud/spanner_v1/proto/mutation_pb2.py deleted file mode 100644 index 4719d77a50..0000000000 --- a/google/cloud/spanner_v1/proto/mutation_pb2.py +++ /dev/null @@ -1,448 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/spanner_v1/proto/mutation.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 -from google.cloud.spanner_v1.proto import ( - keys_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_keys__pb2, -) -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/spanner_v1/proto/mutation.proto", - package="google.spanner.v1", - syntax="proto3", - serialized_options=b"\n\025com.google.spanner.v1B\rMutationProtoP\001Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\252\002\027Google.Cloud.Spanner.V1\312\002\027Google\\Cloud\\Spanner\\V1\352\002\032Google::Cloud::Spanner::V1", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n,google/cloud/spanner_v1/proto/mutation.proto\x12\x11google.spanner.v1\x1a\x1cgoogle/protobuf/struct.proto\x1a(google/cloud/spanner_v1/proto/keys.proto\x1a\x1cgoogle/api/annotations.proto"\xc6\x03\n\x08Mutation\x12\x33\n\x06insert\x18\x01 \x01(\x0b\x32!.google.spanner.v1.Mutation.WriteH\x00\x12\x33\n\x06update\x18\x02 \x01(\x0b\x32!.google.spanner.v1.Mutation.WriteH\x00\x12=\n\x10insert_or_update\x18\x03 \x01(\x0b\x32!.google.spanner.v1.Mutation.WriteH\x00\x12\x34\n\x07replace\x18\x04 \x01(\x0b\x32!.google.spanner.v1.Mutation.WriteH\x00\x12\x34\n\x06\x64\x65lete\x18\x05 \x01(\x0b\x32".google.spanner.v1.Mutation.DeleteH\x00\x1aS\n\x05Write\x12\r\n\x05table\x18\x01 \x01(\t\x12\x0f\n\x07\x63olumns\x18\x02 \x03(\t\x12*\n\x06values\x18\x03 \x03(\x0b\x32\x1a.google.protobuf.ListValue\x1a\x43\n\x06\x44\x65lete\x12\r\n\x05table\x18\x01 \x01(\t\x12*\n\x07key_set\x18\x02 \x01(\x0b\x32\x19.google.spanner.v1.KeySetB\x0b\n\toperationB\xb3\x01\n\x15\x63om.google.spanner.v1B\rMutationProtoP\x01Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\xaa\x02\x17Google.Cloud.Spanner.V1\xca\x02\x17Google\\Cloud\\Spanner\\V1\xea\x02\x1aGoogle::Cloud::Spanner::V1b\x06proto3', - dependencies=[ - google_dot_protobuf_dot_struct__pb2.DESCRIPTOR, - google_dot_cloud_dot_spanner__v1_dot_proto_dot_keys__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_MUTATION_WRITE = _descriptor.Descriptor( - name="Write", - full_name="google.spanner.v1.Mutation.Write", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="table", - full_name="google.spanner.v1.Mutation.Write.table", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="columns", - full_name="google.spanner.v1.Mutation.Write.columns", - index=1, - number=2, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="values", - full_name="google.spanner.v1.Mutation.Write.values", - index=2, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=459, - serialized_end=542, -) - -_MUTATION_DELETE = _descriptor.Descriptor( - name="Delete", - full_name="google.spanner.v1.Mutation.Delete", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="table", - full_name="google.spanner.v1.Mutation.Delete.table", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="key_set", - full_name="google.spanner.v1.Mutation.Delete.key_set", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=544, - serialized_end=611, -) - -_MUTATION = _descriptor.Descriptor( - name="Mutation", - full_name="google.spanner.v1.Mutation", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="insert", - full_name="google.spanner.v1.Mutation.insert", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="update", - full_name="google.spanner.v1.Mutation.update", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="insert_or_update", - full_name="google.spanner.v1.Mutation.insert_or_update", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="replace", - full_name="google.spanner.v1.Mutation.replace", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="delete", - full_name="google.spanner.v1.Mutation.delete", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_MUTATION_WRITE, _MUTATION_DELETE], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="operation", - full_name="google.spanner.v1.Mutation.operation", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ) - ], - serialized_start=170, - serialized_end=624, -) - -_MUTATION_WRITE.fields_by_name[ - "values" -].message_type = google_dot_protobuf_dot_struct__pb2._LISTVALUE -_MUTATION_WRITE.containing_type = _MUTATION -_MUTATION_DELETE.fields_by_name[ - "key_set" -].message_type = google_dot_cloud_dot_spanner__v1_dot_proto_dot_keys__pb2._KEYSET -_MUTATION_DELETE.containing_type = _MUTATION -_MUTATION.fields_by_name["insert"].message_type = _MUTATION_WRITE -_MUTATION.fields_by_name["update"].message_type = _MUTATION_WRITE -_MUTATION.fields_by_name["insert_or_update"].message_type = _MUTATION_WRITE -_MUTATION.fields_by_name["replace"].message_type = _MUTATION_WRITE -_MUTATION.fields_by_name["delete"].message_type = _MUTATION_DELETE -_MUTATION.oneofs_by_name["operation"].fields.append(_MUTATION.fields_by_name["insert"]) -_MUTATION.fields_by_name["insert"].containing_oneof = _MUTATION.oneofs_by_name[ - "operation" -] -_MUTATION.oneofs_by_name["operation"].fields.append(_MUTATION.fields_by_name["update"]) -_MUTATION.fields_by_name["update"].containing_oneof = _MUTATION.oneofs_by_name[ - "operation" -] -_MUTATION.oneofs_by_name["operation"].fields.append( - _MUTATION.fields_by_name["insert_or_update"] -) -_MUTATION.fields_by_name[ - "insert_or_update" -].containing_oneof = _MUTATION.oneofs_by_name["operation"] -_MUTATION.oneofs_by_name["operation"].fields.append(_MUTATION.fields_by_name["replace"]) -_MUTATION.fields_by_name["replace"].containing_oneof = _MUTATION.oneofs_by_name[ - "operation" -] -_MUTATION.oneofs_by_name["operation"].fields.append(_MUTATION.fields_by_name["delete"]) -_MUTATION.fields_by_name["delete"].containing_oneof = _MUTATION.oneofs_by_name[ - "operation" -] -DESCRIPTOR.message_types_by_name["Mutation"] = _MUTATION -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -Mutation = _reflection.GeneratedProtocolMessageType( - "Mutation", - (_message.Message,), - { - "Write": _reflection.GeneratedProtocolMessageType( - "Write", - (_message.Message,), - { - "DESCRIPTOR": _MUTATION_WRITE, - "__module__": "google.cloud.spanner_v1.proto.mutation_pb2", - "__doc__": """Arguments to [insert][google.spanner.v1.Mutation.insert], - [update][google.spanner.v1.Mutation.update], - [insert_or_update][google.spanner.v1.Mutation.insert_or_update], and - [replace][google.spanner.v1.Mutation.replace] operations. - - Attributes: - table: - Required. The table whose rows will be written. - columns: - The names of the columns in - [table][google.spanner.v1.Mutation.Write.table] to be written. - The list of columns must contain enough columns to allow Cloud - Spanner to derive values for all primary key columns in the - row(s) to be modified. - values: - The values to be written. ``values`` can contain more than one - list of values. If it does, then multiple rows are written, - one for each entry in ``values``. Each list in ``values`` must - have exactly as many entries as there are entries in - [columns][google.spanner.v1.Mutation.Write.columns] above. - Sending multiple lists is equivalent to sending multiple - ``Mutation``\ s, each containing one ``values`` entry and - repeating [table][google.spanner.v1.Mutation.Write.table] and - [columns][google.spanner.v1.Mutation.Write.columns]. - Individual values in each list are encoded as described - [here][google.spanner.v1.TypeCode]. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.Mutation.Write) - }, - ), - "Delete": _reflection.GeneratedProtocolMessageType( - "Delete", - (_message.Message,), - { - "DESCRIPTOR": _MUTATION_DELETE, - "__module__": "google.cloud.spanner_v1.proto.mutation_pb2", - "__doc__": """Arguments to [delete][google.spanner.v1.Mutation.delete] operations. - - Attributes: - table: - Required. The table whose rows will be deleted. - key_set: - Required. The primary keys of the rows within - [table][google.spanner.v1.Mutation.Delete.table] to delete. - The primary keys must be specified in the order in which they - appear in the ``PRIMARY KEY()`` clause of the table’s - equivalent DDL statement (the DDL statement used to create the - table). Delete is idempotent. The transaction will succeed - even if some or all rows do not exist. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.Mutation.Delete) - }, - ), - "DESCRIPTOR": _MUTATION, - "__module__": "google.cloud.spanner_v1.proto.mutation_pb2", - "__doc__": """A modification to one or more Cloud Spanner rows. Mutations can be - applied to a Cloud Spanner database by sending them in a - [Commit][google.spanner.v1.Spanner.Commit] call. - - Attributes: - operation: - Required. The operation to perform. - insert: - Insert new rows in a table. If any of the rows already exist, - the write or transaction fails with error ``ALREADY_EXISTS``. - update: - Update existing rows in a table. If any of the rows does not - already exist, the transaction fails with error ``NOT_FOUND``. - insert_or_update: - Like [insert][google.spanner.v1.Mutation.insert], except that - if the row already exists, then its column values are - overwritten with the ones provided. Any column values not - explicitly written are preserved. When using [insert_or_updat - e][google.spanner.v1.Mutation.insert_or_update], just as when - using [insert][google.spanner.v1.Mutation.insert], all ``NOT - NULL`` columns in the table must be given a value. This holds - true even when the row already exists and will therefore - actually be updated. - replace: - Like [insert][google.spanner.v1.Mutation.insert], except that - if the row already exists, it is deleted, and the column - values provided are inserted instead. Unlike [insert_or_update - ][google.spanner.v1.Mutation.insert_or_update], this means any - values not explicitly written become ``NULL``. In an - interleaved table, if you create the child table with the ``ON - DELETE CASCADE`` annotation, then replacing a parent row also - deletes the child rows. Otherwise, you must delete the child - rows before you replace the parent row. - delete: - Delete rows from a table. Succeeds whether or not the named - rows were present. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.Mutation) - }, -) -_sym_db.RegisterMessage(Mutation) -_sym_db.RegisterMessage(Mutation.Write) -_sym_db.RegisterMessage(Mutation.Delete) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/spanner_v1/proto/mutation_pb2_grpc.py b/google/cloud/spanner_v1/proto/mutation_pb2_grpc.py deleted file mode 100644 index 8a9393943b..0000000000 --- a/google/cloud/spanner_v1/proto/mutation_pb2_grpc.py +++ /dev/null @@ -1,3 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc diff --git a/google/cloud/spanner_v1/proto/query_plan_pb2.py b/google/cloud/spanner_v1/proto/query_plan_pb2.py deleted file mode 100644 index 747fe73e93..0000000000 --- a/google/cloud/spanner_v1/proto/query_plan_pb2.py +++ /dev/null @@ -1,623 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/spanner_v1/proto/query_plan.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/spanner_v1/proto/query_plan.proto", - package="google.spanner.v1", - syntax="proto3", - serialized_options=b"\n\025com.google.spanner.v1B\016QueryPlanProtoP\001Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\252\002\027Google.Cloud.Spanner.V1\312\002\027Google\\Cloud\\Spanner\\V1\352\002\032Google::Cloud::Spanner::V1", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n.google/cloud/spanner_v1/proto/query_plan.proto\x12\x11google.spanner.v1\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1cgoogle/api/annotations.proto"\xf8\x04\n\x08PlanNode\x12\r\n\x05index\x18\x01 \x01(\x05\x12.\n\x04kind\x18\x02 \x01(\x0e\x32 .google.spanner.v1.PlanNode.Kind\x12\x14\n\x0c\x64isplay_name\x18\x03 \x01(\t\x12:\n\x0b\x63hild_links\x18\x04 \x03(\x0b\x32%.google.spanner.v1.PlanNode.ChildLink\x12M\n\x14short_representation\x18\x05 \x01(\x0b\x32/.google.spanner.v1.PlanNode.ShortRepresentation\x12)\n\x08metadata\x18\x06 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x30\n\x0f\x65xecution_stats\x18\x07 \x01(\x0b\x32\x17.google.protobuf.Struct\x1a@\n\tChildLink\x12\x13\n\x0b\x63hild_index\x18\x01 \x01(\x05\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x10\n\x08variable\x18\x03 \x01(\t\x1a\xb2\x01\n\x13ShortRepresentation\x12\x13\n\x0b\x64\x65scription\x18\x01 \x01(\t\x12S\n\nsubqueries\x18\x02 \x03(\x0b\x32?.google.spanner.v1.PlanNode.ShortRepresentation.SubqueriesEntry\x1a\x31\n\x0fSubqueriesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01"8\n\x04Kind\x12\x14\n\x10KIND_UNSPECIFIED\x10\x00\x12\x0e\n\nRELATIONAL\x10\x01\x12\n\n\x06SCALAR\x10\x02"<\n\tQueryPlan\x12/\n\nplan_nodes\x18\x01 \x03(\x0b\x32\x1b.google.spanner.v1.PlanNodeB\xb4\x01\n\x15\x63om.google.spanner.v1B\x0eQueryPlanProtoP\x01Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\xaa\x02\x17Google.Cloud.Spanner.V1\xca\x02\x17Google\\Cloud\\Spanner\\V1\xea\x02\x1aGoogle::Cloud::Spanner::V1b\x06proto3', - dependencies=[ - google_dot_protobuf_dot_struct__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_PLANNODE_KIND = _descriptor.EnumDescriptor( - name="Kind", - full_name="google.spanner.v1.PlanNode.Kind", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="KIND_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="RELATIONAL", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="SCALAR", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=706, - serialized_end=762, -) -_sym_db.RegisterEnumDescriptor(_PLANNODE_KIND) - - -_PLANNODE_CHILDLINK = _descriptor.Descriptor( - name="ChildLink", - full_name="google.spanner.v1.PlanNode.ChildLink", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="child_index", - full_name="google.spanner.v1.PlanNode.ChildLink.child_index", - index=0, - number=1, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="type", - full_name="google.spanner.v1.PlanNode.ChildLink.type", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="variable", - full_name="google.spanner.v1.PlanNode.ChildLink.variable", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=459, - serialized_end=523, -) - -_PLANNODE_SHORTREPRESENTATION_SUBQUERIESENTRY = _descriptor.Descriptor( - name="SubqueriesEntry", - full_name="google.spanner.v1.PlanNode.ShortRepresentation.SubqueriesEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.spanner.v1.PlanNode.ShortRepresentation.SubqueriesEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.spanner.v1.PlanNode.ShortRepresentation.SubqueriesEntry.value", - index=1, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=655, - serialized_end=704, -) - -_PLANNODE_SHORTREPRESENTATION = _descriptor.Descriptor( - name="ShortRepresentation", - full_name="google.spanner.v1.PlanNode.ShortRepresentation", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="description", - full_name="google.spanner.v1.PlanNode.ShortRepresentation.description", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="subqueries", - full_name="google.spanner.v1.PlanNode.ShortRepresentation.subqueries", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_PLANNODE_SHORTREPRESENTATION_SUBQUERIESENTRY], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=526, - serialized_end=704, -) - -_PLANNODE = _descriptor.Descriptor( - name="PlanNode", - full_name="google.spanner.v1.PlanNode", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="index", - full_name="google.spanner.v1.PlanNode.index", - index=0, - number=1, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="kind", - full_name="google.spanner.v1.PlanNode.kind", - index=1, - number=2, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="display_name", - full_name="google.spanner.v1.PlanNode.display_name", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="child_links", - full_name="google.spanner.v1.PlanNode.child_links", - index=3, - number=4, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="short_representation", - full_name="google.spanner.v1.PlanNode.short_representation", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="metadata", - full_name="google.spanner.v1.PlanNode.metadata", - index=5, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="execution_stats", - full_name="google.spanner.v1.PlanNode.execution_stats", - index=6, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_PLANNODE_CHILDLINK, _PLANNODE_SHORTREPRESENTATION], - enum_types=[_PLANNODE_KIND], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=130, - serialized_end=762, -) - - -_QUERYPLAN = _descriptor.Descriptor( - name="QueryPlan", - full_name="google.spanner.v1.QueryPlan", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="plan_nodes", - full_name="google.spanner.v1.QueryPlan.plan_nodes", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=764, - serialized_end=824, -) - -_PLANNODE_CHILDLINK.containing_type = _PLANNODE -_PLANNODE_SHORTREPRESENTATION_SUBQUERIESENTRY.containing_type = ( - _PLANNODE_SHORTREPRESENTATION -) -_PLANNODE_SHORTREPRESENTATION.fields_by_name[ - "subqueries" -].message_type = _PLANNODE_SHORTREPRESENTATION_SUBQUERIESENTRY -_PLANNODE_SHORTREPRESENTATION.containing_type = _PLANNODE -_PLANNODE.fields_by_name["kind"].enum_type = _PLANNODE_KIND -_PLANNODE.fields_by_name["child_links"].message_type = _PLANNODE_CHILDLINK -_PLANNODE.fields_by_name[ - "short_representation" -].message_type = _PLANNODE_SHORTREPRESENTATION -_PLANNODE.fields_by_name[ - "metadata" -].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT -_PLANNODE.fields_by_name[ - "execution_stats" -].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT -_PLANNODE_KIND.containing_type = _PLANNODE -_QUERYPLAN.fields_by_name["plan_nodes"].message_type = _PLANNODE -DESCRIPTOR.message_types_by_name["PlanNode"] = _PLANNODE -DESCRIPTOR.message_types_by_name["QueryPlan"] = _QUERYPLAN -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -PlanNode = _reflection.GeneratedProtocolMessageType( - "PlanNode", - (_message.Message,), - { - "ChildLink": _reflection.GeneratedProtocolMessageType( - "ChildLink", - (_message.Message,), - { - "DESCRIPTOR": _PLANNODE_CHILDLINK, - "__module__": "google.cloud.spanner_v1.proto.query_plan_pb2", - "__doc__": """Metadata associated with a parent-child relationship appearing in a - [PlanNode][google.spanner.v1.PlanNode]. - - Attributes: - child_index: - The node to which the link points. - type: - The type of the link. For example, in Hash Joins this could be - used to distinguish between the build child and the probe - child, or in the case of the child being an output variable, - to represent the tag associated with the output variable. - variable: - Only present if the child node is - [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] and - corresponds to an output variable of the parent node. The - field carries the name of the output variable. For example, a - ``TableScan`` operator that reads rows from a table will have - child links to the ``SCALAR`` nodes representing the output - variables created for each column that is read by the - operator. The corresponding ``variable`` fields will be set to - the variable names assigned to the columns. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.PlanNode.ChildLink) - }, - ), - "ShortRepresentation": _reflection.GeneratedProtocolMessageType( - "ShortRepresentation", - (_message.Message,), - { - "SubqueriesEntry": _reflection.GeneratedProtocolMessageType( - "SubqueriesEntry", - (_message.Message,), - { - "DESCRIPTOR": _PLANNODE_SHORTREPRESENTATION_SUBQUERIESENTRY, - "__module__": "google.cloud.spanner_v1.proto.query_plan_pb2" - # @@protoc_insertion_point(class_scope:google.spanner.v1.PlanNode.ShortRepresentation.SubqueriesEntry) - }, - ), - "DESCRIPTOR": _PLANNODE_SHORTREPRESENTATION, - "__module__": "google.cloud.spanner_v1.proto.query_plan_pb2", - "__doc__": """Condensed representation of a node and its subtree. Only present for - ``SCALAR`` [PlanNode(s)][google.spanner.v1.PlanNode]. - - Attributes: - description: - A string representation of the expression subtree rooted at - this node. - subqueries: - A mapping of (subquery variable name) -> (subquery node id) - for cases where the ``description`` string of this node - references a ``SCALAR`` subquery contained in the expression - subtree rooted at this node. The referenced ``SCALAR`` - subquery may not necessarily be a direct child of this node. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.PlanNode.ShortRepresentation) - }, - ), - "DESCRIPTOR": _PLANNODE, - "__module__": "google.cloud.spanner_v1.proto.query_plan_pb2", - "__doc__": """Node information for nodes appearing in a - [QueryPlan.plan_nodes][google.spanner.v1.QueryPlan.plan_nodes]. - - Attributes: - index: - The ``PlanNode``\ ’s index in [node - list][google.spanner.v1.QueryPlan.plan_nodes]. - kind: - Used to determine the type of node. May be needed for - visualizing different kinds of nodes differently. For example, - If the node is a - [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] node, it will - have a condensed representation which can be used to directly - embed a description of the node in its parent. - display_name: - The display name for the node. - child_links: - List of child node ``index``\ es and their relationship to - this parent. - short_representation: - Condensed representation for - [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] nodes. - metadata: - Attributes relevant to the node contained in a group of key- - value pairs. For example, a Parameter Reference node could - have the following information in its metadata: :: { - "parameter_reference": "param1", "parameter_type": - "array" } - execution_stats: - The execution statistics associated with the node, contained - in a group of key-value pairs. Only present if the plan was - returned as a result of a profile query. For example, number - of executions, number of rows/time per execution etc. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.PlanNode) - }, -) -_sym_db.RegisterMessage(PlanNode) -_sym_db.RegisterMessage(PlanNode.ChildLink) -_sym_db.RegisterMessage(PlanNode.ShortRepresentation) -_sym_db.RegisterMessage(PlanNode.ShortRepresentation.SubqueriesEntry) - -QueryPlan = _reflection.GeneratedProtocolMessageType( - "QueryPlan", - (_message.Message,), - { - "DESCRIPTOR": _QUERYPLAN, - "__module__": "google.cloud.spanner_v1.proto.query_plan_pb2", - "__doc__": """Contains an ordered list of nodes appearing in the query plan. - - Attributes: - plan_nodes: - The nodes in the query plan. Plan nodes are returned in pre- - order starting with the plan root. Each - [PlanNode][google.spanner.v1.PlanNode]’s ``id`` corresponds to - its index in ``plan_nodes``. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.QueryPlan) - }, -) -_sym_db.RegisterMessage(QueryPlan) - - -DESCRIPTOR._options = None -_PLANNODE_SHORTREPRESENTATION_SUBQUERIESENTRY._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/spanner_v1/proto/query_plan_pb2_grpc.py b/google/cloud/spanner_v1/proto/query_plan_pb2_grpc.py deleted file mode 100644 index 8a9393943b..0000000000 --- a/google/cloud/spanner_v1/proto/query_plan_pb2_grpc.py +++ /dev/null @@ -1,3 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc diff --git a/google/cloud/spanner_v1/proto/result_set_pb2.py b/google/cloud/spanner_v1/proto/result_set_pb2.py deleted file mode 100644 index d9d53e3659..0000000000 --- a/google/cloud/spanner_v1/proto/result_set_pb2.py +++ /dev/null @@ -1,633 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/spanner_v1/proto/result_set.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 -from google.cloud.spanner_v1.proto import ( - query_plan_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_query__plan__pb2, -) -from google.cloud.spanner_v1.proto import ( - transaction_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2, -) -from google.cloud.spanner_v1.proto import ( - type_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_type__pb2, -) -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/spanner_v1/proto/result_set.proto", - package="google.spanner.v1", - syntax="proto3", - serialized_options=b"\n\025com.google.spanner.v1B\016ResultSetProtoP\001Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\370\001\001\252\002\027Google.Cloud.Spanner.V1\312\002\027Google\\Cloud\\Spanner\\V1\352\002\032Google::Cloud::Spanner::V1", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n.google/cloud/spanner_v1/proto/result_set.proto\x12\x11google.spanner.v1\x1a\x1cgoogle/protobuf/struct.proto\x1a.google/cloud/spanner_v1/proto/query_plan.proto\x1a/google/cloud/spanner_v1/proto/transaction.proto\x1a(google/cloud/spanner_v1/proto/type.proto\x1a\x1cgoogle/api/annotations.proto"\x9f\x01\n\tResultSet\x12\x36\n\x08metadata\x18\x01 \x01(\x0b\x32$.google.spanner.v1.ResultSetMetadata\x12(\n\x04rows\x18\x02 \x03(\x0b\x32\x1a.google.protobuf.ListValue\x12\x30\n\x05stats\x18\x03 \x01(\x0b\x32!.google.spanner.v1.ResultSetStats"\xd1\x01\n\x10PartialResultSet\x12\x36\n\x08metadata\x18\x01 \x01(\x0b\x32$.google.spanner.v1.ResultSetMetadata\x12&\n\x06values\x18\x02 \x03(\x0b\x32\x16.google.protobuf.Value\x12\x15\n\rchunked_value\x18\x03 \x01(\x08\x12\x14\n\x0cresume_token\x18\x04 \x01(\x0c\x12\x30\n\x05stats\x18\x05 \x01(\x0b\x32!.google.spanner.v1.ResultSetStats"y\n\x11ResultSetMetadata\x12/\n\x08row_type\x18\x01 \x01(\x0b\x32\x1d.google.spanner.v1.StructType\x12\x33\n\x0btransaction\x18\x02 \x01(\x0b\x32\x1e.google.spanner.v1.Transaction"\xb9\x01\n\x0eResultSetStats\x12\x30\n\nquery_plan\x18\x01 \x01(\x0b\x32\x1c.google.spanner.v1.QueryPlan\x12,\n\x0bquery_stats\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x19\n\x0frow_count_exact\x18\x03 \x01(\x03H\x00\x12\x1f\n\x15row_count_lower_bound\x18\x04 \x01(\x03H\x00\x42\x0b\n\trow_countB\xb7\x01\n\x15\x63om.google.spanner.v1B\x0eResultSetProtoP\x01Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\xf8\x01\x01\xaa\x02\x17Google.Cloud.Spanner.V1\xca\x02\x17Google\\Cloud\\Spanner\\V1\xea\x02\x1aGoogle::Cloud::Spanner::V1b\x06proto3', - dependencies=[ - google_dot_protobuf_dot_struct__pb2.DESCRIPTOR, - google_dot_cloud_dot_spanner__v1_dot_proto_dot_query__plan__pb2.DESCRIPTOR, - google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2.DESCRIPTOR, - google_dot_cloud_dot_spanner__v1_dot_proto_dot_type__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_RESULTSET = _descriptor.Descriptor( - name="ResultSet", - full_name="google.spanner.v1.ResultSet", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="metadata", - full_name="google.spanner.v1.ResultSet.metadata", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="rows", - full_name="google.spanner.v1.ResultSet.rows", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="stats", - full_name="google.spanner.v1.ResultSet.stats", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=269, - serialized_end=428, -) - - -_PARTIALRESULTSET = _descriptor.Descriptor( - name="PartialResultSet", - full_name="google.spanner.v1.PartialResultSet", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="metadata", - full_name="google.spanner.v1.PartialResultSet.metadata", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="values", - full_name="google.spanner.v1.PartialResultSet.values", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="chunked_value", - full_name="google.spanner.v1.PartialResultSet.chunked_value", - index=2, - number=3, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="resume_token", - full_name="google.spanner.v1.PartialResultSet.resume_token", - index=3, - number=4, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="stats", - full_name="google.spanner.v1.PartialResultSet.stats", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=431, - serialized_end=640, -) - - -_RESULTSETMETADATA = _descriptor.Descriptor( - name="ResultSetMetadata", - full_name="google.spanner.v1.ResultSetMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="row_type", - full_name="google.spanner.v1.ResultSetMetadata.row_type", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="transaction", - full_name="google.spanner.v1.ResultSetMetadata.transaction", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=642, - serialized_end=763, -) - - -_RESULTSETSTATS = _descriptor.Descriptor( - name="ResultSetStats", - full_name="google.spanner.v1.ResultSetStats", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="query_plan", - full_name="google.spanner.v1.ResultSetStats.query_plan", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="query_stats", - full_name="google.spanner.v1.ResultSetStats.query_stats", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="row_count_exact", - full_name="google.spanner.v1.ResultSetStats.row_count_exact", - index=2, - number=3, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="row_count_lower_bound", - full_name="google.spanner.v1.ResultSetStats.row_count_lower_bound", - index=3, - number=4, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="row_count", - full_name="google.spanner.v1.ResultSetStats.row_count", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ) - ], - serialized_start=766, - serialized_end=951, -) - -_RESULTSET.fields_by_name["metadata"].message_type = _RESULTSETMETADATA -_RESULTSET.fields_by_name[ - "rows" -].message_type = google_dot_protobuf_dot_struct__pb2._LISTVALUE -_RESULTSET.fields_by_name["stats"].message_type = _RESULTSETSTATS -_PARTIALRESULTSET.fields_by_name["metadata"].message_type = _RESULTSETMETADATA -_PARTIALRESULTSET.fields_by_name[ - "values" -].message_type = google_dot_protobuf_dot_struct__pb2._VALUE -_PARTIALRESULTSET.fields_by_name["stats"].message_type = _RESULTSETSTATS -_RESULTSETMETADATA.fields_by_name[ - "row_type" -].message_type = google_dot_cloud_dot_spanner__v1_dot_proto_dot_type__pb2._STRUCTTYPE -_RESULTSETMETADATA.fields_by_name[ - "transaction" -].message_type = ( - google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2._TRANSACTION -) -_RESULTSETSTATS.fields_by_name[ - "query_plan" -].message_type = ( - google_dot_cloud_dot_spanner__v1_dot_proto_dot_query__plan__pb2._QUERYPLAN -) -_RESULTSETSTATS.fields_by_name[ - "query_stats" -].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT -_RESULTSETSTATS.oneofs_by_name["row_count"].fields.append( - _RESULTSETSTATS.fields_by_name["row_count_exact"] -) -_RESULTSETSTATS.fields_by_name[ - "row_count_exact" -].containing_oneof = _RESULTSETSTATS.oneofs_by_name["row_count"] -_RESULTSETSTATS.oneofs_by_name["row_count"].fields.append( - _RESULTSETSTATS.fields_by_name["row_count_lower_bound"] -) -_RESULTSETSTATS.fields_by_name[ - "row_count_lower_bound" -].containing_oneof = _RESULTSETSTATS.oneofs_by_name["row_count"] -DESCRIPTOR.message_types_by_name["ResultSet"] = _RESULTSET -DESCRIPTOR.message_types_by_name["PartialResultSet"] = _PARTIALRESULTSET -DESCRIPTOR.message_types_by_name["ResultSetMetadata"] = _RESULTSETMETADATA -DESCRIPTOR.message_types_by_name["ResultSetStats"] = _RESULTSETSTATS -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -ResultSet = _reflection.GeneratedProtocolMessageType( - "ResultSet", - (_message.Message,), - { - "DESCRIPTOR": _RESULTSET, - "__module__": "google.cloud.spanner_v1.proto.result_set_pb2", - "__doc__": """Results from [Read][google.spanner.v1.Spanner.Read] or - [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. - - Attributes: - metadata: - Metadata about the result set, such as row type information. - rows: - Each element in ``rows`` is a row whose format is defined by [ - metadata.row_type][google.spanner.v1.ResultSetMetadata.row_typ - e]. The ith element in each row matches the ith field in [meta - data.row_type][google.spanner.v1.ResultSetMetadata.row_type]. - Elements are encoded based on type as described - [here][google.spanner.v1.TypeCode]. - stats: - Query plan and execution statistics for the SQL statement that - produced this result set. These can be requested by setting [E - xecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlReque - st.query_mode]. DML statements always produce stats containing - the number of rows modified, unless executed using the [Execut - eSqlRequest.QueryMode.PLAN][google.spanner.v1.ExecuteSqlReques - t.QueryMode.PLAN] [ExecuteSqlRequest.query_mode][google.spanne - r.v1.ExecuteSqlRequest.query_mode]. Other fields may or may - not be populated, based on the [ExecuteSqlRequest.query_mode][ - google.spanner.v1.ExecuteSqlRequest.query_mode]. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.ResultSet) - }, -) -_sym_db.RegisterMessage(ResultSet) - -PartialResultSet = _reflection.GeneratedProtocolMessageType( - "PartialResultSet", - (_message.Message,), - { - "DESCRIPTOR": _PARTIALRESULTSET, - "__module__": "google.cloud.spanner_v1.proto.result_set_pb2", - "__doc__": """Partial results from a streaming read or SQL query. Streaming reads - and SQL queries better tolerate large result sets, large rows, and - large values, but are a little trickier to consume. - - Attributes: - metadata: - Metadata about the result set, such as row type information. - Only present in the first response. - values: - A streamed result set consists of a stream of values, which - might be split into many ``PartialResultSet`` messages to - accommodate large rows and/or large values. Every N complete - values defines a row, where N is equal to the number of - entries in [metadata.row_type.fields][google.spanner.v1.Struct - Type.fields]. Most values are encoded based on type as - described [here][google.spanner.v1.TypeCode]. It is possible - that the last value in values is “chunked”, meaning that the - rest of the value is sent in subsequent ``PartialResultSet``\ - (s). This is denoted by the [chunked_value][google.spanner.v1. - PartialResultSet.chunked_value] field. Two or more chunked - values can be merged to form a complete value as follows: - - ``bool/number/null``: cannot be chunked - ``string``: - concatenate the strings - ``list``: concatenate the lists. If - the last element in a list is a ``string``, ``list``, or - ``object``, merge it with the first element in the next - list by applying these rules recursively. - ``object``: - concatenate the (field name, field value) pairs. If a field - name is duplicated, then apply these rules recursively to - merge the field values. Some examples of merging: :: - # Strings are concatenated. "foo", "bar" => "foobar" # - Lists of non-strings are concatenated. [2, 3], [4] => [2, - 3, 4] # Lists are concatenated, but the last and first - elements are merged # because they are strings. ["a", - "b"], ["c", "d"] => ["a", "bc", "d"] # Lists are - concatenated, but the last and first elements are merged # - because they are lists. Recursively, the last and first - elements # of the inner lists are merged because they are - strings. ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", - "cd"], "e"] # Non-overlapping object fields are combined. - {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"} # - Overlapping object fields are merged. {"a": "1"}, {"a": - "2"} => {"a": "12"} # Examples of merging objects - containing lists of strings. {"a": ["1"]}, {"a": ["2"]} => - {"a": ["12"]} For a more complete example, suppose a - streaming SQL query is yielding a result set whose rows - contain a single string field. The following - ``PartialResultSet``\ s might be yielded: :: { - "metadata": { ... } "values": ["Hello", "W"] - "chunked_value": true "resume_token": "Af65..." } { - "values": ["orl"] "chunked_value": true - "resume_token": "Bqp2..." } { "values": ["d"] - "resume_token": "Zx1B..." } This sequence of - ``PartialResultSet``\ s encodes two rows, one containing the - field value ``"Hello"``, and a second containing the field - value ``"World" = "W" + "orl" + "d"``. - chunked_value: - If true, then the final value in - [values][google.spanner.v1.PartialResultSet.values] is - chunked, and must be combined with more values from subsequent - ``PartialResultSet``\ s to obtain a complete field value. - resume_token: - Streaming calls might be interrupted for a variety of reasons, - such as TCP connection loss. If this occurs, the stream of - results can be resumed by re-sending the original request and - including ``resume_token``. Note that executing any other - transaction in the same session invalidates the token. - stats: - Query plan and execution statistics for the statement that - produced this streaming result set. These can be requested by - setting [ExecuteSqlRequest.query_mode][google.spanner.v1.Execu - teSqlRequest.query_mode] and are sent only once with the last - response in the stream. This field will also be present in the - last response for DML statements. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.PartialResultSet) - }, -) -_sym_db.RegisterMessage(PartialResultSet) - -ResultSetMetadata = _reflection.GeneratedProtocolMessageType( - "ResultSetMetadata", - (_message.Message,), - { - "DESCRIPTOR": _RESULTSETMETADATA, - "__module__": "google.cloud.spanner_v1.proto.result_set_pb2", - "__doc__": """Metadata about a [ResultSet][google.spanner.v1.ResultSet] or - [PartialResultSet][google.spanner.v1.PartialResultSet]. - - Attributes: - row_type: - Indicates the field names and types for the rows in the result - set. For example, a SQL query like ``"SELECT UserId, UserName - FROM Users"`` could return a ``row_type`` value like: :: - "fields": [ { "name": "UserId", "type": { "code": "INT64" - } }, { "name": "UserName", "type": { "code": "STRING" } - }, ] - transaction: - If the read or SQL query began a transaction as a side-effect, - the information about the new transaction is yielded here. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.ResultSetMetadata) - }, -) -_sym_db.RegisterMessage(ResultSetMetadata) - -ResultSetStats = _reflection.GeneratedProtocolMessageType( - "ResultSetStats", - (_message.Message,), - { - "DESCRIPTOR": _RESULTSETSTATS, - "__module__": "google.cloud.spanner_v1.proto.result_set_pb2", - "__doc__": """Additional statistics about a [ResultSet][google.spanner.v1.ResultSet] - or [PartialResultSet][google.spanner.v1.PartialResultSet]. - - Attributes: - query_plan: - [QueryPlan][google.spanner.v1.QueryPlan] for the query - associated with this result. - query_stats: - Aggregated statistics from the execution of the query. Only - present when the query is profiled. For example, a query could - return the statistics as follows: :: { - "rows_returned": "3", "elapsed_time": "1.22 secs", - "cpu_time": "1.19 secs" } - row_count: - The number of rows modified by the DML statement. - row_count_exact: - Standard DML returns an exact count of rows that were - modified. - row_count_lower_bound: - Partitioned DML does not offer exactly-once semantics, so it - returns a lower bound of the rows modified. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.ResultSetStats) - }, -) -_sym_db.RegisterMessage(ResultSetStats) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/spanner_v1/proto/result_set_pb2_grpc.py b/google/cloud/spanner_v1/proto/result_set_pb2_grpc.py deleted file mode 100644 index 8a9393943b..0000000000 --- a/google/cloud/spanner_v1/proto/result_set_pb2_grpc.py +++ /dev/null @@ -1,3 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc diff --git a/google/cloud/spanner_v1/proto/spanner_database_admin.proto b/google/cloud/spanner_v1/proto/spanner_database_admin.proto deleted file mode 100644 index 56dbff19e1..0000000000 --- a/google/cloud/spanner_v1/proto/spanner_database_admin.proto +++ /dev/null @@ -1,302 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.spanner.admin.database.v1; - -import "google/api/annotations.proto"; -import "google/iam/v1/iam_policy.proto"; -import "google/iam/v1/policy.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/timestamp.proto"; - -option csharp_namespace = "Google.Cloud.Spanner.Admin.Database.V1"; -option go_package = "google.golang.org/genproto/googleapis/spanner/admin/database/v1;database"; -option java_multiple_files = true; -option java_outer_classname = "SpannerDatabaseAdminProto"; -option java_package = "com.google.spanner.admin.database.v1"; -option php_namespace = "Google\\Cloud\\Spanner\\Admin\\Database\\V1"; - - -// Cloud Spanner Database Admin API -// -// The Cloud Spanner Database Admin API can be used to create, drop, and -// list databases. It also enables updating the schema of pre-existing -// databases. -service DatabaseAdmin { - // Lists Cloud Spanner databases. - rpc ListDatabases(ListDatabasesRequest) returns (ListDatabasesResponse) { - option (google.api.http) = { - get: "/v1/{parent=projects/*/instances/*}/databases" - }; - } - - // Creates a new Cloud Spanner database and starts to prepare it for serving. - // The returned [long-running operation][google.longrunning.Operation] will - // have a name of the format `/operations/` and - // can be used to track preparation of the database. The - // [metadata][google.longrunning.Operation.metadata] field type is - // [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. The - // [response][google.longrunning.Operation.response] field type is - // [Database][google.spanner.admin.database.v1.Database], if successful. - rpc CreateDatabase(CreateDatabaseRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1/{parent=projects/*/instances/*}/databases" - body: "*" - }; - } - - // Gets the state of a Cloud Spanner database. - rpc GetDatabase(GetDatabaseRequest) returns (Database) { - option (google.api.http) = { - get: "/v1/{name=projects/*/instances/*/databases/*}" - }; - } - - // Updates the schema of a Cloud Spanner database by - // creating/altering/dropping tables, columns, indexes, etc. The returned - // [long-running operation][google.longrunning.Operation] will have a name of - // the format `/operations/` and can be used to - // track execution of the schema change(s). The - // [metadata][google.longrunning.Operation.metadata] field type is - // [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. The operation has no response. - rpc UpdateDatabaseDdl(UpdateDatabaseDdlRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - patch: "/v1/{database=projects/*/instances/*/databases/*}/ddl" - body: "*" - }; - } - - // Drops (aka deletes) a Cloud Spanner database. - rpc DropDatabase(DropDatabaseRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v1/{database=projects/*/instances/*/databases/*}" - }; - } - - // Returns the schema of a Cloud Spanner database as a list of formatted - // DDL statements. This method does not show pending schema updates, those may - // be queried using the [Operations][google.longrunning.Operations] API. - rpc GetDatabaseDdl(GetDatabaseDdlRequest) returns (GetDatabaseDdlResponse) { - option (google.api.http) = { - get: "/v1/{database=projects/*/instances/*/databases/*}/ddl" - }; - } - - // Sets the access control policy on a database resource. Replaces any - // existing policy. - // - // Authorization requires `spanner.databases.setIamPolicy` permission on - // [resource][google.iam.v1.SetIamPolicyRequest.resource]. - rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) returns (google.iam.v1.Policy) { - option (google.api.http) = { - post: "/v1/{resource=projects/*/instances/*/databases/*}:setIamPolicy" - body: "*" - }; - } - - // Gets the access control policy for a database resource. Returns an empty - // policy if a database exists but does not have a policy set. - // - // Authorization requires `spanner.databases.getIamPolicy` permission on - // [resource][google.iam.v1.GetIamPolicyRequest.resource]. - rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) returns (google.iam.v1.Policy) { - option (google.api.http) = { - post: "/v1/{resource=projects/*/instances/*/databases/*}:getIamPolicy" - body: "*" - }; - } - - // Returns permissions that the caller has on the specified database resource. - // - // Attempting this RPC on a non-existent Cloud Spanner database will result in - // a NOT_FOUND error if the user has `spanner.databases.list` permission on - // the containing Cloud Spanner instance. Otherwise returns an empty set of - // permissions. - rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) returns (google.iam.v1.TestIamPermissionsResponse) { - option (google.api.http) = { - post: "/v1/{resource=projects/*/instances/*/databases/*}:testIamPermissions" - body: "*" - }; - } -} - -// A Cloud Spanner database. -message Database { - // Indicates the current state of the database. - enum State { - // Not specified. - STATE_UNSPECIFIED = 0; - - // The database is still being created. Operations on the database may fail - // with `FAILED_PRECONDITION` in this state. - CREATING = 1; - - // The database is fully created and ready for use. - READY = 2; - } - - // Required. The name of the database. Values are of the form - // `projects//instances//databases/`, - // where `` is as specified in the `CREATE DATABASE` - // statement. This name can be passed to other API methods to - // identify the database. - string name = 1; - - // Output only. The current database state. - State state = 2; -} - -// The request for [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. -message ListDatabasesRequest { - // Required. The instance whose databases should be listed. - // Values are of the form `projects//instances/`. - string parent = 1; - - // Number of databases to be returned in the response. If 0 or less, - // defaults to the server's maximum allowed page size. - int32 page_size = 3; - - // If non-empty, `page_token` should contain a - // [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token] from a - // previous [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse]. - string page_token = 4; -} - -// The response for [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. -message ListDatabasesResponse { - // Databases that matched the request. - repeated Database databases = 1; - - // `next_page_token` can be sent in a subsequent - // [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases] call to fetch more - // of the matching databases. - string next_page_token = 2; -} - -// The request for [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase]. -message CreateDatabaseRequest { - // Required. The name of the instance that will serve the new database. - // Values are of the form `projects//instances/`. - string parent = 1; - - // Required. A `CREATE DATABASE` statement, which specifies the ID of the - // new database. The database ID must conform to the regular expression - // `[a-z][a-z0-9_\-]*[a-z0-9]` and be between 2 and 30 characters in length. - // If the database ID is a reserved word or if it contains a hyphen, the - // database ID must be enclosed in backticks (`` ` ``). - string create_statement = 2; - - // An optional list of DDL statements to run inside the newly created - // database. Statements can create tables, indexes, etc. These - // statements execute atomically with the creation of the database: - // if there is an error in any statement, the database is not created. - repeated string extra_statements = 3; -} - -// Metadata type for the operation returned by -// [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase]. -message CreateDatabaseMetadata { - // The database being created. - string database = 1; -} - -// The request for [GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase]. -message GetDatabaseRequest { - // Required. The name of the requested database. Values are of the form - // `projects//instances//databases/`. - string name = 1; -} - -// Enqueues the given DDL statements to be applied, in order but not -// necessarily all at once, to the database schema at some point (or -// points) in the future. The server checks that the statements -// are executable (syntactically valid, name tables that exist, etc.) -// before enqueueing them, but they may still fail upon -// later execution (e.g., if a statement from another batch of -// statements is applied first and it conflicts in some way, or if -// there is some data-related problem like a `NULL` value in a column to -// which `NOT NULL` would be added). If a statement fails, all -// subsequent statements in the batch are automatically cancelled. -// -// Each batch of statements is assigned a name which can be used with -// the [Operations][google.longrunning.Operations] API to monitor -// progress. See the -// [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id] field for more -// details. -message UpdateDatabaseDdlRequest { - // Required. The database to update. - string database = 1; - - // DDL statements to be applied to the database. - repeated string statements = 2; - - // If empty, the new update request is assigned an - // automatically-generated operation ID. Otherwise, `operation_id` - // is used to construct the name of the resulting - // [Operation][google.longrunning.Operation]. - // - // Specifying an explicit operation ID simplifies determining - // whether the statements were executed in the event that the - // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] call is replayed, - // or the return value is otherwise lost: the [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database] and - // `operation_id` fields can be combined to form the - // [name][google.longrunning.Operation.name] of the resulting - // [longrunning.Operation][google.longrunning.Operation]: `/operations/`. - // - // `operation_id` should be unique within the database, and must be - // a valid identifier: `[a-z][a-z0-9_]*`. Note that - // automatically-generated operation IDs always begin with an - // underscore. If the named operation already exists, - // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] returns - // `ALREADY_EXISTS`. - string operation_id = 3; -} - -// Metadata type for the operation returned by -// [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]. -message UpdateDatabaseDdlMetadata { - // The database being modified. - string database = 1; - - // For an update this list contains all the statements. For an - // individual statement, this list contains only that statement. - repeated string statements = 2; - - // Reports the commit timestamps of all statements that have - // succeeded so far, where `commit_timestamps[i]` is the commit - // timestamp for the statement `statements[i]`. - repeated google.protobuf.Timestamp commit_timestamps = 3; -} - -// The request for [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase]. -message DropDatabaseRequest { - // Required. The database to be dropped. - string database = 1; -} - -// The request for [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. -message GetDatabaseDdlRequest { - // Required. The database whose schema we wish to get. - string database = 1; -} - -// The response for [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. -message GetDatabaseDdlResponse { - // A list of formatted DDL statements defining the schema of the database - // specified in the request. - repeated string statements = 1; -} diff --git a/google/cloud/spanner_v1/proto/spanner_instance_admin.proto b/google/cloud/spanner_v1/proto/spanner_instance_admin.proto deleted file mode 100644 index e960e5428e..0000000000 --- a/google/cloud/spanner_v1/proto/spanner_instance_admin.proto +++ /dev/null @@ -1,475 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.spanner.admin.instance.v1; - -import "google/api/annotations.proto"; -import "google/iam/v1/iam_policy.proto"; -import "google/iam/v1/policy.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/field_mask.proto"; -import "google/protobuf/timestamp.proto"; - -option csharp_namespace = "Google.Cloud.Spanner.Admin.Instance.V1"; -option go_package = "google.golang.org/genproto/googleapis/spanner/admin/instance/v1;instance"; -option java_multiple_files = true; -option java_outer_classname = "SpannerInstanceAdminProto"; -option java_package = "com.google.spanner.admin.instance.v1"; -option php_namespace = "Google\\Cloud\\Spanner\\Admin\\Instance\\V1"; - - -// Cloud Spanner Instance Admin API -// -// The Cloud Spanner Instance Admin API can be used to create, delete, -// modify and list instances. Instances are dedicated Cloud Spanner serving -// and storage resources to be used by Cloud Spanner databases. -// -// Each instance has a "configuration", which dictates where the -// serving resources for the Cloud Spanner instance are located (e.g., -// US-central, Europe). Configurations are created by Google based on -// resource availability. -// -// Cloud Spanner billing is based on the instances that exist and their -// sizes. After an instance exists, there are no additional -// per-database or per-operation charges for use of the instance -// (though there may be additional network bandwidth charges). -// Instances offer isolation: problems with databases in one instance -// will not affect other instances. However, within an instance -// databases can affect each other. For example, if one database in an -// instance receives a lot of requests and consumes most of the -// instance resources, fewer resources are available for other -// databases in that instance, and their performance may suffer. -service InstanceAdmin { - // Lists the supported instance configurations for a given project. - rpc ListInstanceConfigs(ListInstanceConfigsRequest) returns (ListInstanceConfigsResponse) { - option (google.api.http) = { - get: "/v1/{parent=projects/*}/instanceConfigs" - }; - } - - // Gets information about a particular instance configuration. - rpc GetInstanceConfig(GetInstanceConfigRequest) returns (InstanceConfig) { - option (google.api.http) = { - get: "/v1/{name=projects/*/instanceConfigs/*}" - }; - } - - // Lists all instances in the given project. - rpc ListInstances(ListInstancesRequest) returns (ListInstancesResponse) { - option (google.api.http) = { - get: "/v1/{parent=projects/*}/instances" - }; - } - - // Gets information about a particular instance. - rpc GetInstance(GetInstanceRequest) returns (Instance) { - option (google.api.http) = { - get: "/v1/{name=projects/*/instances/*}" - }; - } - - // Creates an instance and begins preparing it to begin serving. The - // returned [long-running operation][google.longrunning.Operation] - // can be used to track the progress of preparing the new - // instance. The instance name is assigned by the caller. If the - // named instance already exists, `CreateInstance` returns - // `ALREADY_EXISTS`. - // - // Immediately upon completion of this request: - // - // * The instance is readable via the API, with all requested attributes - // but no allocated resources. Its state is `CREATING`. - // - // Until completion of the returned operation: - // - // * Cancelling the operation renders the instance immediately unreadable - // via the API. - // * The instance can be deleted. - // * All other attempts to modify the instance are rejected. - // - // Upon completion of the returned operation: - // - // * Billing for all successfully-allocated resources begins (some types - // may have lower than the requested levels). - // * Databases can be created in the instance. - // * The instance's allocated resource levels are readable via the API. - // * The instance's state becomes `READY`. - // - // The returned [long-running operation][google.longrunning.Operation] will - // have a name of the format `/operations/` and - // can be used to track creation of the instance. The - // [metadata][google.longrunning.Operation.metadata] field type is - // [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata]. - // The [response][google.longrunning.Operation.response] field type is - // [Instance][google.spanner.admin.instance.v1.Instance], if successful. - rpc CreateInstance(CreateInstanceRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1/{parent=projects/*}/instances" - body: "*" - }; - } - - // Updates an instance, and begins allocating or releasing resources - // as requested. The returned [long-running - // operation][google.longrunning.Operation] can be used to track the - // progress of updating the instance. If the named instance does not - // exist, returns `NOT_FOUND`. - // - // Immediately upon completion of this request: - // - // * For resource types for which a decrease in the instance's allocation - // has been requested, billing is based on the newly-requested level. - // - // Until completion of the returned operation: - // - // * Cancelling the operation sets its metadata's - // [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time], and begins - // restoring resources to their pre-request values. The operation - // is guaranteed to succeed at undoing all resource changes, - // after which point it terminates with a `CANCELLED` status. - // * All other attempts to modify the instance are rejected. - // * Reading the instance via the API continues to give the pre-request - // resource levels. - // - // Upon completion of the returned operation: - // - // * Billing begins for all successfully-allocated resources (some types - // may have lower than the requested levels). - // * All newly-reserved resources are available for serving the instance's - // tables. - // * The instance's new resource levels are readable via the API. - // - // The returned [long-running operation][google.longrunning.Operation] will - // have a name of the format `/operations/` and - // can be used to track the instance modification. The - // [metadata][google.longrunning.Operation.metadata] field type is - // [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata]. - // The [response][google.longrunning.Operation.response] field type is - // [Instance][google.spanner.admin.instance.v1.Instance], if successful. - // - // Authorization requires `spanner.instances.update` permission on - // resource [name][google.spanner.admin.instance.v1.Instance.name]. - rpc UpdateInstance(UpdateInstanceRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - patch: "/v1/{instance.name=projects/*/instances/*}" - body: "*" - }; - } - - // Deletes an instance. - // - // Immediately upon completion of the request: - // - // * Billing ceases for all of the instance's reserved resources. - // - // Soon afterward: - // - // * The instance and *all of its databases* immediately and - // irrevocably disappear from the API. All data in the databases - // is permanently deleted. - rpc DeleteInstance(DeleteInstanceRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v1/{name=projects/*/instances/*}" - }; - } - - // Sets the access control policy on an instance resource. Replaces any - // existing policy. - // - // Authorization requires `spanner.instances.setIamPolicy` on - // [resource][google.iam.v1.SetIamPolicyRequest.resource]. - rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) returns (google.iam.v1.Policy) { - option (google.api.http) = { - post: "/v1/{resource=projects/*/instances/*}:setIamPolicy" - body: "*" - }; - } - - // Gets the access control policy for an instance resource. Returns an empty - // policy if an instance exists but does not have a policy set. - // - // Authorization requires `spanner.instances.getIamPolicy` on - // [resource][google.iam.v1.GetIamPolicyRequest.resource]. - rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) returns (google.iam.v1.Policy) { - option (google.api.http) = { - post: "/v1/{resource=projects/*/instances/*}:getIamPolicy" - body: "*" - }; - } - - // Returns permissions that the caller has on the specified instance resource. - // - // Attempting this RPC on a non-existent Cloud Spanner instance resource will - // result in a NOT_FOUND error if the user has `spanner.instances.list` - // permission on the containing Google Cloud Project. Otherwise returns an - // empty set of permissions. - rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) returns (google.iam.v1.TestIamPermissionsResponse) { - option (google.api.http) = { - post: "/v1/{resource=projects/*/instances/*}:testIamPermissions" - body: "*" - }; - } -} - -// A possible configuration for a Cloud Spanner instance. Configurations -// define the geographic placement of nodes and their replication. -message InstanceConfig { - // A unique identifier for the instance configuration. Values - // are of the form - // `projects//instanceConfigs/[a-z][-a-z0-9]*` - string name = 1; - - // The name of this instance configuration as it appears in UIs. - string display_name = 2; -} - -// An isolated set of Cloud Spanner resources on which databases can be hosted. -message Instance { - // Indicates the current state of the instance. - enum State { - // Not specified. - STATE_UNSPECIFIED = 0; - - // The instance is still being created. Resources may not be - // available yet, and operations such as database creation may not - // work. - CREATING = 1; - - // The instance is fully created and ready to do work such as - // creating databases. - READY = 2; - } - - // Required. A unique identifier for the instance, which cannot be changed - // after the instance is created. Values are of the form - // `projects//instances/[a-z][-a-z0-9]*[a-z0-9]`. The final - // segment of the name must be between 6 and 30 characters in length. - string name = 1; - - // Required. The name of the instance's configuration. Values are of the form - // `projects//instanceConfigs/`. See - // also [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] and - // [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]. - string config = 2; - - // Required. The descriptive name for this instance as it appears in UIs. - // Must be unique per project and between 4 and 30 characters in length. - string display_name = 3; - - // Required. The number of nodes allocated to this instance. This may be zero - // in API responses for instances that are not yet in state `READY`. - // - // See [the documentation](https://cloud.google.com/spanner/docs/instances#node_count) - // for more information about nodes. - int32 node_count = 5; - - // Output only. The current instance state. For - // [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance], the state must be - // either omitted or set to `CREATING`. For - // [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance], the state must be - // either omitted or set to `READY`. - State state = 6; - - // Cloud Labels are a flexible and lightweight mechanism for organizing cloud - // resources into groups that reflect a customer's organizational needs and - // deployment strategies. Cloud Labels can be used to filter collections of - // resources. They can be used to control how resource metrics are aggregated. - // And they can be used as arguments to policy management rules (e.g. route, - // firewall, load balancing, etc.). - // - // * Label keys must be between 1 and 63 characters long and must conform to - // the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`. - // * Label values must be between 0 and 63 characters long and must conform - // to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`. - // * No more than 64 labels can be associated with a given resource. - // - // See https://goo.gl/xmQnxf for more information on and examples of labels. - // - // If you plan to use labels in your own code, please note that additional - // characters may be allowed in the future. And so you are advised to use an - // internal label representation, such as JSON, which doesn't rely upon - // specific characters being disallowed. For example, representing labels - // as the string: name + "_" + value would prove problematic if we were to - // allow "_" in a future release. - map labels = 7; -} - -// The request for [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]. -message ListInstanceConfigsRequest { - // Required. The name of the project for which a list of supported instance - // configurations is requested. Values are of the form - // `projects/`. - string parent = 1; - - // Number of instance configurations to be returned in the response. If 0 or - // less, defaults to the server's maximum allowed page size. - int32 page_size = 2; - - // If non-empty, `page_token` should contain a - // [next_page_token][google.spanner.admin.instance.v1.ListInstanceConfigsResponse.next_page_token] - // from a previous [ListInstanceConfigsResponse][google.spanner.admin.instance.v1.ListInstanceConfigsResponse]. - string page_token = 3; -} - -// The response for [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]. -message ListInstanceConfigsResponse { - // The list of requested instance configurations. - repeated InstanceConfig instance_configs = 1; - - // `next_page_token` can be sent in a subsequent - // [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs] call to - // fetch more of the matching instance configurations. - string next_page_token = 2; -} - -// The request for -// [GetInstanceConfigRequest][google.spanner.admin.instance.v1.InstanceAdmin.GetInstanceConfig]. -message GetInstanceConfigRequest { - // Required. The name of the requested instance configuration. Values are of - // the form `projects//instanceConfigs/`. - string name = 1; -} - -// The request for [GetInstance][google.spanner.admin.instance.v1.InstanceAdmin.GetInstance]. -message GetInstanceRequest { - // Required. The name of the requested instance. Values are of the form - // `projects//instances/`. - string name = 1; -} - -// The request for [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]. -message CreateInstanceRequest { - // Required. The name of the project in which to create the instance. Values - // are of the form `projects/`. - string parent = 1; - - // Required. The ID of the instance to create. Valid identifiers are of the - // form `[a-z][-a-z0-9]*[a-z0-9]` and must be between 6 and 30 characters in - // length. - string instance_id = 2; - - // Required. The instance to create. The name may be omitted, but if - // specified must be `/instances/`. - Instance instance = 3; -} - -// The request for [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances]. -message ListInstancesRequest { - // Required. The name of the project for which a list of instances is - // requested. Values are of the form `projects/`. - string parent = 1; - - // Number of instances to be returned in the response. If 0 or less, defaults - // to the server's maximum allowed page size. - int32 page_size = 2; - - // If non-empty, `page_token` should contain a - // [next_page_token][google.spanner.admin.instance.v1.ListInstancesResponse.next_page_token] from a - // previous [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse]. - string page_token = 3; - - // An expression for filtering the results of the request. Filter rules are - // case insensitive. The fields eligible for filtering are: - // - // * `name` - // * `display_name` - // * `labels.key` where key is the name of a label - // - // Some examples of using filters are: - // - // * `name:*` --> The instance has a name. - // * `name:Howl` --> The instance's name contains the string "howl". - // * `name:HOWL` --> Equivalent to above. - // * `NAME:howl` --> Equivalent to above. - // * `labels.env:*` --> The instance has the label "env". - // * `labels.env:dev` --> The instance has the label "env" and the value of - // the label contains the string "dev". - // * `name:howl labels.env:dev` --> The instance's name contains "howl" and - // it has the label "env" with its value - // containing "dev". - string filter = 4; -} - -// The response for [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances]. -message ListInstancesResponse { - // The list of requested instances. - repeated Instance instances = 1; - - // `next_page_token` can be sent in a subsequent - // [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances] call to fetch more - // of the matching instances. - string next_page_token = 2; -} - -// The request for [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]. -message UpdateInstanceRequest { - // Required. The instance to update, which must always include the instance - // name. Otherwise, only fields mentioned in [][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask] need be included. - Instance instance = 1; - - // Required. A mask specifying which fields in [][google.spanner.admin.instance.v1.UpdateInstanceRequest.instance] should be updated. - // The field mask must always be specified; this prevents any future fields in - // [][google.spanner.admin.instance.v1.Instance] from being erased accidentally by clients that do not know - // about them. - google.protobuf.FieldMask field_mask = 2; -} - -// The request for [DeleteInstance][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstance]. -message DeleteInstanceRequest { - // Required. The name of the instance to be deleted. Values are of the form - // `projects//instances/` - string name = 1; -} - -// Metadata type for the operation returned by -// [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]. -message CreateInstanceMetadata { - // The instance being created. - Instance instance = 1; - - // The time at which the - // [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance] request was - // received. - google.protobuf.Timestamp start_time = 2; - - // The time at which this operation was cancelled. If set, this operation is - // in the process of undoing itself (which is guaranteed to succeed) and - // cannot be cancelled again. - google.protobuf.Timestamp cancel_time = 3; - - // The time at which this operation failed or was completed successfully. - google.protobuf.Timestamp end_time = 4; -} - -// Metadata type for the operation returned by -// [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]. -message UpdateInstanceMetadata { - // The desired end state of the update. - Instance instance = 1; - - // The time at which [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance] - // request was received. - google.protobuf.Timestamp start_time = 2; - - // The time at which this operation was cancelled. If set, this operation is - // in the process of undoing itself (which is guaranteed to succeed) and - // cannot be cancelled again. - google.protobuf.Timestamp cancel_time = 3; - - // The time at which this operation failed or was completed successfully. - google.protobuf.Timestamp end_time = 4; -} diff --git a/google/cloud/spanner_v1/proto/spanner_pb2.py b/google/cloud/spanner_v1/proto/spanner_pb2.py deleted file mode 100644 index a48a12ca59..0000000000 --- a/google/cloud/spanner_v1/proto/spanner_pb2.py +++ /dev/null @@ -1,3437 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/spanner_v1/proto/spanner.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 -from google.cloud.spanner_v1.proto import ( - keys_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_keys__pb2, -) -from google.cloud.spanner_v1.proto import ( - mutation_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_mutation__pb2, -) -from google.cloud.spanner_v1.proto import ( - result_set_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2, -) -from google.cloud.spanner_v1.proto import ( - transaction_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2, -) -from google.cloud.spanner_v1.proto import ( - type_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_type__pb2, -) - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/spanner_v1/proto/spanner.proto", - package="google.spanner.v1", - syntax="proto3", - serialized_options=b"\n\025com.google.spanner.v1B\014SpannerProtoP\001Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\252\002\027Google.Cloud.Spanner.V1\312\002\027Google\\Cloud\\Spanner\\V1\352\002\032Google::Cloud::Spanner::V1\352A_\n\037spanner.googleapis.com/Database\022\n\x11partition_options\x18\x06 \x01(\x0b\x32#.google.spanner.v1.PartitionOptions\x1aJ\n\x0fParamTypesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12&\n\x05value\x18\x02 \x01(\x0b\x32\x17.google.spanner.v1.Type:\x02\x38\x01"\xb1\x02\n\x14PartitionReadRequest\x12\x37\n\x07session\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1espanner.googleapis.com/Session\x12;\n\x0btransaction\x18\x02 \x01(\x0b\x32&.google.spanner.v1.TransactionSelector\x12\x12\n\x05table\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\r\n\x05index\x18\x04 \x01(\t\x12\x0f\n\x07\x63olumns\x18\x05 \x03(\t\x12/\n\x07key_set\x18\x06 \x01(\x0b\x32\x19.google.spanner.v1.KeySetB\x03\xe0\x41\x02\x12>\n\x11partition_options\x18\t \x01(\x0b\x32#.google.spanner.v1.PartitionOptions"$\n\tPartition\x12\x17\n\x0fpartition_token\x18\x01 \x01(\x0c"z\n\x11PartitionResponse\x12\x30\n\npartitions\x18\x01 \x03(\x0b\x32\x1c.google.spanner.v1.Partition\x12\x33\n\x0btransaction\x18\x02 \x01(\x0b\x32\x1e.google.spanner.v1.Transaction"\xab\x02\n\x0bReadRequest\x12\x37\n\x07session\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1espanner.googleapis.com/Session\x12;\n\x0btransaction\x18\x02 \x01(\x0b\x32&.google.spanner.v1.TransactionSelector\x12\x12\n\x05table\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\r\n\x05index\x18\x04 \x01(\t\x12\x14\n\x07\x63olumns\x18\x05 \x03(\tB\x03\xe0\x41\x02\x12/\n\x07key_set\x18\x06 \x01(\x0b\x32\x19.google.spanner.v1.KeySetB\x03\xe0\x41\x02\x12\r\n\x05limit\x18\x08 \x01(\x03\x12\x14\n\x0cresume_token\x18\t \x01(\x0c\x12\x17\n\x0fpartition_token\x18\n \x01(\x0c"\x8f\x01\n\x17\x42\x65ginTransactionRequest\x12\x37\n\x07session\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1espanner.googleapis.com/Session\x12;\n\x07options\x18\x02 \x01(\x0b\x32%.google.spanner.v1.TransactionOptionsB\x03\xe0\x41\x02"\xea\x01\n\rCommitRequest\x12\x37\n\x07session\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1espanner.googleapis.com/Session\x12\x18\n\x0etransaction_id\x18\x02 \x01(\x0cH\x00\x12G\n\x16single_use_transaction\x18\x03 \x01(\x0b\x32%.google.spanner.v1.TransactionOptionsH\x00\x12.\n\tmutations\x18\x04 \x03(\x0b\x32\x1b.google.spanner.v1.MutationB\r\n\x0btransaction"F\n\x0e\x43ommitResponse\x12\x34\n\x10\x63ommit_timestamp\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"g\n\x0fRollbackRequest\x12\x37\n\x07session\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1espanner.googleapis.com/Session\x12\x1b\n\x0etransaction_id\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x32\xc0\x16\n\x07Spanner\x12\xa6\x01\n\rCreateSession\x12\'.google.spanner.v1.CreateSessionRequest\x1a\x1a.google.spanner.v1.Session"P\x82\xd3\xe4\x93\x02?":/v1/{database=projects/*/instances/*/databases/*}/sessions:\x01*\xda\x41\x08\x64\x61tabase\x12\xe0\x01\n\x13\x42\x61tchCreateSessions\x12-.google.spanner.v1.BatchCreateSessionsRequest\x1a..google.spanner.v1.BatchCreateSessionsResponse"j\x82\xd3\xe4\x93\x02K"F/v1/{database=projects/*/instances/*/databases/*}/sessions:batchCreate:\x01*\xda\x41\x16\x64\x61tabase,session_count\x12\x97\x01\n\nGetSession\x12$.google.spanner.v1.GetSessionRequest\x1a\x1a.google.spanner.v1.Session"G\x82\xd3\xe4\x93\x02:\x12\x38/v1/{name=projects/*/instances/*/databases/*/sessions/*}\xda\x41\x04name\x12\xae\x01\n\x0cListSessions\x12&.google.spanner.v1.ListSessionsRequest\x1a\'.google.spanner.v1.ListSessionsResponse"M\x82\xd3\xe4\x93\x02<\x12:/v1/{database=projects/*/instances/*/databases/*}/sessions\xda\x41\x08\x64\x61tabase\x12\x99\x01\n\rDeleteSession\x12\'.google.spanner.v1.DeleteSessionRequest\x1a\x16.google.protobuf.Empty"G\x82\xd3\xe4\x93\x02:*8/v1/{name=projects/*/instances/*/databases/*/sessions/*}\xda\x41\x04name\x12\xa3\x01\n\nExecuteSql\x12$.google.spanner.v1.ExecuteSqlRequest\x1a\x1c.google.spanner.v1.ResultSet"Q\x82\xd3\xe4\x93\x02K"F/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeSql:\x01*\x12\xbe\x01\n\x13\x45xecuteStreamingSql\x12$.google.spanner.v1.ExecuteSqlRequest\x1a#.google.spanner.v1.PartialResultSet"Z\x82\xd3\xe4\x93\x02T"O/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeStreamingSql:\x01*0\x01\x12\xc0\x01\n\x0f\x45xecuteBatchDml\x12).google.spanner.v1.ExecuteBatchDmlRequest\x1a*.google.spanner.v1.ExecuteBatchDmlResponse"V\x82\xd3\xe4\x93\x02P"K/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeBatchDml:\x01*\x12\x91\x01\n\x04Read\x12\x1e.google.spanner.v1.ReadRequest\x1a\x1c.google.spanner.v1.ResultSet"K\x82\xd3\xe4\x93\x02\x45"@/v1/{session=projects/*/instances/*/databases/*/sessions/*}:read:\x01*\x12\xac\x01\n\rStreamingRead\x12\x1e.google.spanner.v1.ReadRequest\x1a#.google.spanner.v1.PartialResultSet"T\x82\xd3\xe4\x93\x02N"I/v1/{session=projects/*/instances/*/databases/*/sessions/*}:streamingRead:\x01*0\x01\x12\xc9\x01\n\x10\x42\x65ginTransaction\x12*.google.spanner.v1.BeginTransactionRequest\x1a\x1e.google.spanner.v1.Transaction"i\x82\xd3\xe4\x93\x02Q"L/v1/{session=projects/*/instances/*/databases/*/sessions/*}:beginTransaction:\x01*\xda\x41\x0fsession,options\x12\xeb\x01\n\x06\x43ommit\x12 .google.spanner.v1.CommitRequest\x1a!.google.spanner.v1.CommitResponse"\x9b\x01\x82\xd3\xe4\x93\x02G"B/v1/{session=projects/*/instances/*/databases/*/sessions/*}:commit:\x01*\xda\x41 session,transaction_id,mutations\xda\x41(session,single_use_transaction,mutations\x12\xb0\x01\n\x08Rollback\x12".google.spanner.v1.RollbackRequest\x1a\x16.google.protobuf.Empty"h\x82\xd3\xe4\x93\x02I"D/v1/{session=projects/*/instances/*/databases/*/sessions/*}:rollback:\x01*\xda\x41\x16session,transaction_id\x12\xb7\x01\n\x0ePartitionQuery\x12(.google.spanner.v1.PartitionQueryRequest\x1a$.google.spanner.v1.PartitionResponse"U\x82\xd3\xe4\x93\x02O"J/v1/{session=projects/*/instances/*/databases/*/sessions/*}:partitionQuery:\x01*\x12\xb4\x01\n\rPartitionRead\x12\'.google.spanner.v1.PartitionReadRequest\x1a$.google.spanner.v1.PartitionResponse"T\x82\xd3\xe4\x93\x02N"I/v1/{session=projects/*/instances/*/databases/*/sessions/*}:partitionRead:\x01*\x1aw\xca\x41\x16spanner.googleapis.com\xd2\x41[https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/spanner.dataB\x94\x02\n\x15\x63om.google.spanner.v1B\x0cSpannerProtoP\x01Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\xaa\x02\x17Google.Cloud.Spanner.V1\xca\x02\x17Google\\Cloud\\Spanner\\V1\xea\x02\x1aGoogle::Cloud::Spanner::V1\xea\x41_\n\x1fspanner.googleapis.com/Database\x12 The - session has the label “env”. - ``labels.env:dev`` –> The - session has the label “env” and the value of the label - contains the string “dev”. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.ListSessionsRequest) - }, -) -_sym_db.RegisterMessage(ListSessionsRequest) - -ListSessionsResponse = _reflection.GeneratedProtocolMessageType( - "ListSessionsResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTSESSIONSRESPONSE, - "__module__": "google.cloud.spanner_v1.proto.spanner_pb2", - "__doc__": """The response for - [ListSessions][google.spanner.v1.Spanner.ListSessions]. - - Attributes: - sessions: - The list of requested sessions. - next_page_token: - \ ``next_page_token`` can be sent in a subsequent - [ListSessions][google.spanner.v1.Spanner.ListSessions] call to - fetch more of the matching sessions. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.ListSessionsResponse) - }, -) -_sym_db.RegisterMessage(ListSessionsResponse) - -DeleteSessionRequest = _reflection.GeneratedProtocolMessageType( - "DeleteSessionRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETESESSIONREQUEST, - "__module__": "google.cloud.spanner_v1.proto.spanner_pb2", - "__doc__": """The request for - [DeleteSession][google.spanner.v1.Spanner.DeleteSession]. - - Attributes: - name: - Required. The name of the session to delete. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.DeleteSessionRequest) - }, -) -_sym_db.RegisterMessage(DeleteSessionRequest) - -ExecuteSqlRequest = _reflection.GeneratedProtocolMessageType( - "ExecuteSqlRequest", - (_message.Message,), - { - "QueryOptions": _reflection.GeneratedProtocolMessageType( - "QueryOptions", - (_message.Message,), - { - "DESCRIPTOR": _EXECUTESQLREQUEST_QUERYOPTIONS, - "__module__": "google.cloud.spanner_v1.proto.spanner_pb2", - "__doc__": """Query optimizer configuration. - - Attributes: - optimizer_version: - An option to control the selection of optimizer version. This - parameter allows individual queries to pick different query - optimizer versions. Specifying “latest” as a value instructs - Cloud Spanner to use the latest supported query optimizer - version. If not specified, Cloud Spanner uses optimizer - version set at the database level options. Any other positive - integer (from the list of supported optimizer versions) - overrides the default optimizer version for query execution. - The list of supported optimizer versions can be queried from - SPANNER_SYS.SUPPORTED_OPTIMIZER_VERSIONS. Executing a SQL - statement with an invalid optimizer version will fail with a - syntax error (``INVALID_ARGUMENT``) status. The - ``optimizer_version`` statement hint has precedence over this - setting. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.ExecuteSqlRequest.QueryOptions) - }, - ), - "ParamTypesEntry": _reflection.GeneratedProtocolMessageType( - "ParamTypesEntry", - (_message.Message,), - { - "DESCRIPTOR": _EXECUTESQLREQUEST_PARAMTYPESENTRY, - "__module__": "google.cloud.spanner_v1.proto.spanner_pb2" - # @@protoc_insertion_point(class_scope:google.spanner.v1.ExecuteSqlRequest.ParamTypesEntry) - }, - ), - "DESCRIPTOR": _EXECUTESQLREQUEST, - "__module__": "google.cloud.spanner_v1.proto.spanner_pb2", - "__doc__": """The request for [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and - [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql]. - - Attributes: - session: - Required. The session in which the SQL query should be - performed. - transaction: - The transaction to use. For queries, if none is provided, the - default is a temporary read-only transaction with strong - concurrency. Standard DML statements require a read-write - transaction. To protect against replays, single-use - transactions are not supported. The caller must either supply - an existing transaction ID or begin a new transaction. - Partitioned DML requires an existing Partitioned DML - transaction ID. - sql: - Required. The SQL string. - params: - Parameter names and values that bind to placeholders in the - SQL string. A parameter placeholder consists of the ``@`` - character followed by the parameter name (for example, - ``@firstName``). Parameter names can contain letters, numbers, - and underscores. Parameters can appear anywhere that a - literal value is expected. The same parameter name can be used - more than once, for example: ``"WHERE id > @msg_id AND id < - @msg_id + 100"`` It is an error to execute a SQL statement - with unbound parameters. - param_types: - It is not always possible for Cloud Spanner to infer the right - SQL type from a JSON value. For example, values of type - ``BYTES`` and values of type ``STRING`` both appear in - [params][google.spanner.v1.ExecuteSqlRequest.params] as JSON - strings. In these cases, ``param_types`` can be used to - specify the exact SQL type for some or all of the SQL - statement parameters. See the definition of - [Type][google.spanner.v1.Type] for more information about SQL - types. - resume_token: - If this request is resuming a previously interrupted SQL - statement execution, ``resume_token`` should be copied from - the last - [PartialResultSet][google.spanner.v1.PartialResultSet] yielded - before the interruption. Doing this enables the new SQL - statement execution to resume where the last one left off. The - rest of the request parameters must exactly match the request - that yielded this token. - query_mode: - Used to control the amount of debugging information returned - in [ResultSetStats][google.spanner.v1.ResultSetStats]. If [par - tition_token][google.spanner.v1.ExecuteSqlRequest.partition_to - ken] is set, - [query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode] - can only be set to [QueryMode.NORMAL][google.spanner.v1.Execut - eSqlRequest.QueryMode.NORMAL]. - partition_token: - If present, results will be restricted to the specified - partition previously created using PartitionQuery(). There - must be an exact match for the values of fields common to this - message and the PartitionQueryRequest message used to create - this partition_token. - seqno: - A per-transaction sequence number used to identify this - request. This field makes each request idempotent such that if - the request is received multiple times, at most one will - succeed. The sequence number must be monotonically increasing - within the transaction. If a request arrives for the first - time with an out-of-order sequence number, the transaction may - be aborted. Replays of previously handled requests will yield - the same response as the first execution. Required for DML - statements. Ignored for queries. - query_options: - Query optimizer configuration to use for the given query. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.ExecuteSqlRequest) - }, -) -_sym_db.RegisterMessage(ExecuteSqlRequest) -_sym_db.RegisterMessage(ExecuteSqlRequest.QueryOptions) -_sym_db.RegisterMessage(ExecuteSqlRequest.ParamTypesEntry) - -ExecuteBatchDmlRequest = _reflection.GeneratedProtocolMessageType( - "ExecuteBatchDmlRequest", - (_message.Message,), - { - "Statement": _reflection.GeneratedProtocolMessageType( - "Statement", - (_message.Message,), - { - "ParamTypesEntry": _reflection.GeneratedProtocolMessageType( - "ParamTypesEntry", - (_message.Message,), - { - "DESCRIPTOR": _EXECUTEBATCHDMLREQUEST_STATEMENT_PARAMTYPESENTRY, - "__module__": "google.cloud.spanner_v1.proto.spanner_pb2" - # @@protoc_insertion_point(class_scope:google.spanner.v1.ExecuteBatchDmlRequest.Statement.ParamTypesEntry) - }, - ), - "DESCRIPTOR": _EXECUTEBATCHDMLREQUEST_STATEMENT, - "__module__": "google.cloud.spanner_v1.proto.spanner_pb2", - "__doc__": """A single DML statement. - - Attributes: - sql: - Required. The DML string. - params: - Parameter names and values that bind to placeholders in the - DML string. A parameter placeholder consists of the ``@`` - character followed by the parameter name (for example, - ``@firstName``). Parameter names can contain letters, numbers, - and underscores. Parameters can appear anywhere that a - literal value is expected. The same parameter name can be used - more than once, for example: ``"WHERE id > @msg_id AND id < - @msg_id + 100"`` It is an error to execute a SQL statement - with unbound parameters. - param_types: - It is not always possible for Cloud Spanner to infer the right - SQL type from a JSON value. For example, values of type - ``BYTES`` and values of type ``STRING`` both appear in [params - ][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params] - as JSON strings. In these cases, ``param_types`` can be used - to specify the exact SQL type for some or all of the SQL - statement parameters. See the definition of - [Type][google.spanner.v1.Type] for more information about SQL - types. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.ExecuteBatchDmlRequest.Statement) - }, - ), - "DESCRIPTOR": _EXECUTEBATCHDMLREQUEST, - "__module__": "google.cloud.spanner_v1.proto.spanner_pb2", - "__doc__": """The request for - [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. - - Attributes: - session: - Required. The session in which the DML statements should be - performed. - transaction: - Required. The transaction to use. Must be a read-write - transaction. To protect against replays, single-use - transactions are not supported. The caller must either supply - an existing transaction ID or begin a new transaction. - statements: - Required. The list of statements to execute in this batch. - Statements are executed serially, such that the effects of - statement ``i`` are visible to statement ``i+1``. Each - statement must be a DML statement. Execution stops at the - first failed statement; the remaining statements are not - executed. Callers must provide at least one statement. - seqno: - Required. A per-transaction sequence number used to identify - this request. This field makes each request idempotent such - that if the request is received multiple times, at most one - will succeed. The sequence number must be monotonically - increasing within the transaction. If a request arrives for - the first time with an out-of-order sequence number, the - transaction may be aborted. Replays of previously handled - requests will yield the same response as the first execution. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.ExecuteBatchDmlRequest) - }, -) -_sym_db.RegisterMessage(ExecuteBatchDmlRequest) -_sym_db.RegisterMessage(ExecuteBatchDmlRequest.Statement) -_sym_db.RegisterMessage(ExecuteBatchDmlRequest.Statement.ParamTypesEntry) - -ExecuteBatchDmlResponse = _reflection.GeneratedProtocolMessageType( - "ExecuteBatchDmlResponse", - (_message.Message,), - { - "DESCRIPTOR": _EXECUTEBATCHDMLRESPONSE, - "__module__": "google.cloud.spanner_v1.proto.spanner_pb2", - "__doc__": """The response for - [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. Contains - a list of [ResultSet][google.spanner.v1.ResultSet] messages, one for - each DML statement that has successfully executed, in the same order - as the statements in the request. If a statement fails, the status in - the response body identifies the cause of the failure. To check for - DML statements that failed, use the following approach: 1. Check the - status in the response message. The - [google.rpc.Code][google.rpc.Code] enum value ``OK`` indicates that - all statements were executed successfully. 2. If the status was not - ``OK``, check the number of result sets in the response. If the - response contains ``N`` [ResultSet][google.spanner.v1.ResultSet] - messages, then statement ``N+1`` in the request failed. Example 1: - - Request: 5 DML statements, all executed successfully. - Response: - 5 [ResultSet][google.spanner.v1.ResultSet] messages, with the - status ``OK``. Example 2: - Request: 5 DML statements. The third - statement has a syntax error. - Response: 2 - [ResultSet][google.spanner.v1.ResultSet] messages, and a syntax - error (``INVALID_ARGUMENT``) status. The number of - [ResultSet][google.spanner.v1.ResultSet] messages indicates that the - third statement failed, and the fourth and fifth statements were not - executed. - - Attributes: - result_sets: - One [ResultSet][google.spanner.v1.ResultSet] for each - statement in the request that ran successfully, in the same - order as the statements in the request. Each - [ResultSet][google.spanner.v1.ResultSet] does not contain any - rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] - in each [ResultSet][google.spanner.v1.ResultSet] contain the - number of rows modified by the statement. Only the first - [ResultSet][google.spanner.v1.ResultSet] in the response - contains valid - [ResultSetMetadata][google.spanner.v1.ResultSetMetadata]. - status: - If all DML statements are executed successfully, the status is - ``OK``. Otherwise, the error status of the first failed - statement. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.ExecuteBatchDmlResponse) - }, -) -_sym_db.RegisterMessage(ExecuteBatchDmlResponse) - -PartitionOptions = _reflection.GeneratedProtocolMessageType( - "PartitionOptions", - (_message.Message,), - { - "DESCRIPTOR": _PARTITIONOPTIONS, - "__module__": "google.cloud.spanner_v1.proto.spanner_pb2", - "__doc__": """Options for a PartitionQueryRequest and PartitionReadRequest. - - Attributes: - partition_size_bytes: - \ **Note:** This hint is currently ignored by PartitionQuery - and PartitionRead requests. The desired data size for each - partition generated. The default for this option is currently - 1 GiB. This is only a hint. The actual size of each partition - may be smaller or larger than this size request. - max_partitions: - \ **Note:** This hint is currently ignored by PartitionQuery - and PartitionRead requests. The desired maximum number of - partitions to return. For example, this may be set to the - number of workers available. The default for this option is - currently 10,000. The maximum value is currently 200,000. This - is only a hint. The actual number of partitions returned may - be smaller or larger than this maximum count request. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.PartitionOptions) - }, -) -_sym_db.RegisterMessage(PartitionOptions) - -PartitionQueryRequest = _reflection.GeneratedProtocolMessageType( - "PartitionQueryRequest", - (_message.Message,), - { - "ParamTypesEntry": _reflection.GeneratedProtocolMessageType( - "ParamTypesEntry", - (_message.Message,), - { - "DESCRIPTOR": _PARTITIONQUERYREQUEST_PARAMTYPESENTRY, - "__module__": "google.cloud.spanner_v1.proto.spanner_pb2" - # @@protoc_insertion_point(class_scope:google.spanner.v1.PartitionQueryRequest.ParamTypesEntry) - }, - ), - "DESCRIPTOR": _PARTITIONQUERYREQUEST, - "__module__": "google.cloud.spanner_v1.proto.spanner_pb2", - "__doc__": """The request for - [PartitionQuery][google.spanner.v1.Spanner.PartitionQuery] - - Attributes: - session: - Required. The session used to create the partitions. - transaction: - Read only snapshot transactions are supported, read/write and - single use transactions are not. - sql: - Required. The query request to generate partitions for. The - request will fail if the query is not root partitionable. The - query plan of a root partitionable query has a single - distributed union operator. A distributed union operator - conceptually divides one or more tables into multiple splits, - remotely evaluates a subquery independently on each split, and - then unions all results. This must not contain DML commands, - such as INSERT, UPDATE, or DELETE. Use [ExecuteStreamingSql][g - oogle.spanner.v1.Spanner.ExecuteStreamingSql] with a - PartitionedDml transaction for large, partition-friendly DML - operations. - params: - Parameter names and values that bind to placeholders in the - SQL string. A parameter placeholder consists of the ``@`` - character followed by the parameter name (for example, - ``@firstName``). Parameter names can contain letters, numbers, - and underscores. Parameters can appear anywhere that a - literal value is expected. The same parameter name can be used - more than once, for example: ``"WHERE id > @msg_id AND id < - @msg_id + 100"`` It is an error to execute a SQL statement - with unbound parameters. - param_types: - It is not always possible for Cloud Spanner to infer the right - SQL type from a JSON value. For example, values of type - ``BYTES`` and values of type ``STRING`` both appear in - [params][google.spanner.v1.PartitionQueryRequest.params] as - JSON strings. In these cases, ``param_types`` can be used to - specify the exact SQL type for some or all of the SQL query - parameters. See the definition of - [Type][google.spanner.v1.Type] for more information about SQL - types. - partition_options: - Additional options that affect how many partitions are - created. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.PartitionQueryRequest) - }, -) -_sym_db.RegisterMessage(PartitionQueryRequest) -_sym_db.RegisterMessage(PartitionQueryRequest.ParamTypesEntry) - -PartitionReadRequest = _reflection.GeneratedProtocolMessageType( - "PartitionReadRequest", - (_message.Message,), - { - "DESCRIPTOR": _PARTITIONREADREQUEST, - "__module__": "google.cloud.spanner_v1.proto.spanner_pb2", - "__doc__": """The request for - [PartitionRead][google.spanner.v1.Spanner.PartitionRead] - - Attributes: - session: - Required. The session used to create the partitions. - transaction: - Read only snapshot transactions are supported, read/write and - single use transactions are not. - table: - Required. The name of the table in the database to be read. - index: - If non-empty, the name of an index on - [table][google.spanner.v1.PartitionReadRequest.table]. This - index is used instead of the table primary key when - interpreting - [key_set][google.spanner.v1.PartitionReadRequest.key_set] and - sorting result rows. See - [key_set][google.spanner.v1.PartitionReadRequest.key_set] for - further information. - columns: - The columns of - [table][google.spanner.v1.PartitionReadRequest.table] to be - returned for each row matching this request. - key_set: - Required. ``key_set`` identifies the rows to be yielded. - ``key_set`` names the primary keys of the rows in - [table][google.spanner.v1.PartitionReadRequest.table] to be - yielded, unless - [index][google.spanner.v1.PartitionReadRequest.index] is - present. If - [index][google.spanner.v1.PartitionReadRequest.index] is - present, then - [key_set][google.spanner.v1.PartitionReadRequest.key_set] - instead names index keys in - [index][google.spanner.v1.PartitionReadRequest.index]. It is - not an error for the ``key_set`` to name rows that do not - exist in the database. Read yields nothing for nonexistent - rows. - partition_options: - Additional options that affect how many partitions are - created. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.PartitionReadRequest) - }, -) -_sym_db.RegisterMessage(PartitionReadRequest) - -Partition = _reflection.GeneratedProtocolMessageType( - "Partition", - (_message.Message,), - { - "DESCRIPTOR": _PARTITION, - "__module__": "google.cloud.spanner_v1.proto.spanner_pb2", - "__doc__": """Information returned for each partition returned in a - PartitionResponse. - - Attributes: - partition_token: - This token can be passed to Read, StreamingRead, ExecuteSql, - or ExecuteStreamingSql requests to restrict the results to - those identified by this partition token. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.Partition) - }, -) -_sym_db.RegisterMessage(Partition) - -PartitionResponse = _reflection.GeneratedProtocolMessageType( - "PartitionResponse", - (_message.Message,), - { - "DESCRIPTOR": _PARTITIONRESPONSE, - "__module__": "google.cloud.spanner_v1.proto.spanner_pb2", - "__doc__": """The response for - [PartitionQuery][google.spanner.v1.Spanner.PartitionQuery] or - [PartitionRead][google.spanner.v1.Spanner.PartitionRead] - - Attributes: - partitions: - Partitions created by this request. - transaction: - Transaction created by this request. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.PartitionResponse) - }, -) -_sym_db.RegisterMessage(PartitionResponse) - -ReadRequest = _reflection.GeneratedProtocolMessageType( - "ReadRequest", - (_message.Message,), - { - "DESCRIPTOR": _READREQUEST, - "__module__": "google.cloud.spanner_v1.proto.spanner_pb2", - "__doc__": """The request for [Read][google.spanner.v1.Spanner.Read] and - [StreamingRead][google.spanner.v1.Spanner.StreamingRead]. - - Attributes: - session: - Required. The session in which the read should be performed. - transaction: - The transaction to use. If none is provided, the default is a - temporary read-only transaction with strong concurrency. - table: - Required. The name of the table in the database to be read. - index: - If non-empty, the name of an index on - [table][google.spanner.v1.ReadRequest.table]. This index is - used instead of the table primary key when interpreting - [key\_set][google.spanner.v1.ReadRequest.key\_set] and sorting - result rows. See - [key\_set][google.spanner.v1.ReadRequest.key\_set] for further - information. - columns: - Required. The columns of - [table][google.spanner.v1.ReadRequest.table] to be returned - for each row matching this request. - key_set: - Required. ``key_set`` identifies the rows to be yielded. - ``key_set`` names the primary keys of the rows in - [table][google.spanner.v1.ReadRequest.table] to be yielded, - unless [index][google.spanner.v1.ReadRequest.index] is - present. If [index][google.spanner.v1.ReadRequest.index] is - present, then - [key\_set][google.spanner.v1.ReadRequest.key\_set] instead - names index keys in - [index][google.spanner.v1.ReadRequest.index]. If the [partiti - on\_token][google.spanner.v1.ReadRequest.partition\_token] - field is empty, rows are yielded in table primary key order - (if [index][google.spanner.v1.ReadRequest.index] is empty) or - index key order (if - [index][google.spanner.v1.ReadRequest.index] is non-empty). If - the [partition\_token][google.spanner.v1.ReadRequest.partition - \_token] field is not empty, rows will be yielded in an - unspecified order. It is not an error for the ``key_set`` to - name rows that do not exist in the database. Read yields - nothing for nonexistent rows. - limit: - If greater than zero, only the first ``limit`` rows are - yielded. If ``limit`` is zero, the default is no limit. A - limit cannot be specified if ``partition_token`` is set. - resume_token: - If this request is resuming a previously interrupted read, - ``resume_token`` should be copied from the last - [PartialResultSet][google.spanner.v1.PartialResultSet] yielded - before the interruption. Doing this enables the new read to - resume where the last read left off. The rest of the request - parameters must exactly match the request that yielded this - token. - partition_token: - If present, results will be restricted to the specified - partition previously created using PartitionRead(). There must - be an exact match for the values of fields common to this - message and the PartitionReadRequest message used to create - this partition_token. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.ReadRequest) - }, -) -_sym_db.RegisterMessage(ReadRequest) - -BeginTransactionRequest = _reflection.GeneratedProtocolMessageType( - "BeginTransactionRequest", - (_message.Message,), - { - "DESCRIPTOR": _BEGINTRANSACTIONREQUEST, - "__module__": "google.cloud.spanner_v1.proto.spanner_pb2", - "__doc__": """The request for - [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction]. - - Attributes: - session: - Required. The session in which the transaction runs. - options: - Required. Options for the new transaction. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.BeginTransactionRequest) - }, -) -_sym_db.RegisterMessage(BeginTransactionRequest) - -CommitRequest = _reflection.GeneratedProtocolMessageType( - "CommitRequest", - (_message.Message,), - { - "DESCRIPTOR": _COMMITREQUEST, - "__module__": "google.cloud.spanner_v1.proto.spanner_pb2", - "__doc__": """The request for [Commit][google.spanner.v1.Spanner.Commit]. - - Attributes: - session: - Required. The session in which the transaction to be committed - is running. - transaction: - Required. The transaction in which to commit. - transaction_id: - Commit a previously-started transaction. - single_use_transaction: - Execute mutations in a temporary transaction. Note that unlike - commit of a previously-started transaction, commit with a - temporary transaction is non-idempotent. That is, if the - ``CommitRequest`` is sent to Cloud Spanner more than once (for - instance, due to retries in the application, or in the - transport library), it is possible that the mutations are - executed more than once. If this is undesirable, use - [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction] - and [Commit][google.spanner.v1.Spanner.Commit] instead. - mutations: - The mutations to be executed when this transaction commits. - All mutations are applied atomically, in the order they appear - in this list. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.CommitRequest) - }, -) -_sym_db.RegisterMessage(CommitRequest) - -CommitResponse = _reflection.GeneratedProtocolMessageType( - "CommitResponse", - (_message.Message,), - { - "DESCRIPTOR": _COMMITRESPONSE, - "__module__": "google.cloud.spanner_v1.proto.spanner_pb2", - "__doc__": """The response for [Commit][google.spanner.v1.Spanner.Commit]. - - Attributes: - commit_timestamp: - The Cloud Spanner timestamp at which the transaction - committed. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.CommitResponse) - }, -) -_sym_db.RegisterMessage(CommitResponse) - -RollbackRequest = _reflection.GeneratedProtocolMessageType( - "RollbackRequest", - (_message.Message,), - { - "DESCRIPTOR": _ROLLBACKREQUEST, - "__module__": "google.cloud.spanner_v1.proto.spanner_pb2", - "__doc__": """The request for [Rollback][google.spanner.v1.Spanner.Rollback]. - - Attributes: - session: - Required. The session in which the transaction to roll back is - running. - transaction_id: - Required. The transaction to roll back. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.RollbackRequest) - }, -) -_sym_db.RegisterMessage(RollbackRequest) - - -DESCRIPTOR._options = None -_CREATESESSIONREQUEST.fields_by_name["database"]._options = None -_BATCHCREATESESSIONSREQUEST.fields_by_name["database"]._options = None -_BATCHCREATESESSIONSREQUEST.fields_by_name["session_count"]._options = None -_SESSION_LABELSENTRY._options = None -_SESSION._options = None -_GETSESSIONREQUEST.fields_by_name["name"]._options = None -_LISTSESSIONSREQUEST.fields_by_name["database"]._options = None -_DELETESESSIONREQUEST.fields_by_name["name"]._options = None -_EXECUTESQLREQUEST_PARAMTYPESENTRY._options = None -_EXECUTESQLREQUEST.fields_by_name["session"]._options = None -_EXECUTESQLREQUEST.fields_by_name["sql"]._options = None -_EXECUTEBATCHDMLREQUEST_STATEMENT_PARAMTYPESENTRY._options = None -_EXECUTEBATCHDMLREQUEST.fields_by_name["session"]._options = None -_EXECUTEBATCHDMLREQUEST.fields_by_name["transaction"]._options = None -_EXECUTEBATCHDMLREQUEST.fields_by_name["statements"]._options = None -_EXECUTEBATCHDMLREQUEST.fields_by_name["seqno"]._options = None -_PARTITIONQUERYREQUEST_PARAMTYPESENTRY._options = None -_PARTITIONQUERYREQUEST.fields_by_name["session"]._options = None -_PARTITIONQUERYREQUEST.fields_by_name["sql"]._options = None -_PARTITIONREADREQUEST.fields_by_name["session"]._options = None -_PARTITIONREADREQUEST.fields_by_name["table"]._options = None -_PARTITIONREADREQUEST.fields_by_name["key_set"]._options = None -_READREQUEST.fields_by_name["session"]._options = None -_READREQUEST.fields_by_name["table"]._options = None -_READREQUEST.fields_by_name["columns"]._options = None -_READREQUEST.fields_by_name["key_set"]._options = None -_BEGINTRANSACTIONREQUEST.fields_by_name["session"]._options = None -_BEGINTRANSACTIONREQUEST.fields_by_name["options"]._options = None -_COMMITREQUEST.fields_by_name["session"]._options = None -_ROLLBACKREQUEST.fields_by_name["session"]._options = None -_ROLLBACKREQUEST.fields_by_name["transaction_id"]._options = None - -_SPANNER = _descriptor.ServiceDescriptor( - name="Spanner", - full_name="google.spanner.v1.Spanner", - file=DESCRIPTOR, - index=0, - serialized_options=b"\312A\026spanner.googleapis.com\322A[https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/spanner.data", - create_key=_descriptor._internal_create_key, - serialized_start=4716, - serialized_end=7596, - methods=[ - _descriptor.MethodDescriptor( - name="CreateSession", - full_name="google.spanner.v1.Spanner.CreateSession", - index=0, - containing_service=None, - input_type=_CREATESESSIONREQUEST, - output_type=_SESSION, - serialized_options=b'\202\323\344\223\002?":/v1/{database=projects/*/instances/*/databases/*}/sessions:\001*\332A\010database', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="BatchCreateSessions", - full_name="google.spanner.v1.Spanner.BatchCreateSessions", - index=1, - containing_service=None, - input_type=_BATCHCREATESESSIONSREQUEST, - output_type=_BATCHCREATESESSIONSRESPONSE, - serialized_options=b'\202\323\344\223\002K"F/v1/{database=projects/*/instances/*/databases/*}/sessions:batchCreate:\001*\332A\026database,session_count', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetSession", - full_name="google.spanner.v1.Spanner.GetSession", - index=2, - containing_service=None, - input_type=_GETSESSIONREQUEST, - output_type=_SESSION, - serialized_options=b"\202\323\344\223\002:\0228/v1/{name=projects/*/instances/*/databases/*/sessions/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ListSessions", - full_name="google.spanner.v1.Spanner.ListSessions", - index=3, - containing_service=None, - input_type=_LISTSESSIONSREQUEST, - output_type=_LISTSESSIONSRESPONSE, - serialized_options=b"\202\323\344\223\002<\022:/v1/{database=projects/*/instances/*/databases/*}/sessions\332A\010database", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DeleteSession", - full_name="google.spanner.v1.Spanner.DeleteSession", - index=4, - containing_service=None, - input_type=_DELETESESSIONREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b"\202\323\344\223\002:*8/v1/{name=projects/*/instances/*/databases/*/sessions/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ExecuteSql", - full_name="google.spanner.v1.Spanner.ExecuteSql", - index=5, - containing_service=None, - input_type=_EXECUTESQLREQUEST, - output_type=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2._RESULTSET, - serialized_options=b'\202\323\344\223\002K"F/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeSql:\001*', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ExecuteStreamingSql", - full_name="google.spanner.v1.Spanner.ExecuteStreamingSql", - index=6, - containing_service=None, - input_type=_EXECUTESQLREQUEST, - output_type=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2._PARTIALRESULTSET, - serialized_options=b'\202\323\344\223\002T"O/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeStreamingSql:\001*', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ExecuteBatchDml", - full_name="google.spanner.v1.Spanner.ExecuteBatchDml", - index=7, - containing_service=None, - input_type=_EXECUTEBATCHDMLREQUEST, - output_type=_EXECUTEBATCHDMLRESPONSE, - serialized_options=b'\202\323\344\223\002P"K/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeBatchDml:\001*', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="Read", - full_name="google.spanner.v1.Spanner.Read", - index=8, - containing_service=None, - input_type=_READREQUEST, - output_type=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2._RESULTSET, - serialized_options=b'\202\323\344\223\002E"@/v1/{session=projects/*/instances/*/databases/*/sessions/*}:read:\001*', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="StreamingRead", - full_name="google.spanner.v1.Spanner.StreamingRead", - index=9, - containing_service=None, - input_type=_READREQUEST, - output_type=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2._PARTIALRESULTSET, - serialized_options=b'\202\323\344\223\002N"I/v1/{session=projects/*/instances/*/databases/*/sessions/*}:streamingRead:\001*', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="BeginTransaction", - full_name="google.spanner.v1.Spanner.BeginTransaction", - index=10, - containing_service=None, - input_type=_BEGINTRANSACTIONREQUEST, - output_type=google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2._TRANSACTION, - serialized_options=b'\202\323\344\223\002Q"L/v1/{session=projects/*/instances/*/databases/*/sessions/*}:beginTransaction:\001*\332A\017session,options', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="Commit", - full_name="google.spanner.v1.Spanner.Commit", - index=11, - containing_service=None, - input_type=_COMMITREQUEST, - output_type=_COMMITRESPONSE, - serialized_options=b'\202\323\344\223\002G"B/v1/{session=projects/*/instances/*/databases/*/sessions/*}:commit:\001*\332A session,transaction_id,mutations\332A(session,single_use_transaction,mutations', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="Rollback", - full_name="google.spanner.v1.Spanner.Rollback", - index=12, - containing_service=None, - input_type=_ROLLBACKREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b'\202\323\344\223\002I"D/v1/{session=projects/*/instances/*/databases/*/sessions/*}:rollback:\001*\332A\026session,transaction_id', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="PartitionQuery", - full_name="google.spanner.v1.Spanner.PartitionQuery", - index=13, - containing_service=None, - input_type=_PARTITIONQUERYREQUEST, - output_type=_PARTITIONRESPONSE, - serialized_options=b'\202\323\344\223\002O"J/v1/{session=projects/*/instances/*/databases/*/sessions/*}:partitionQuery:\001*', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="PartitionRead", - full_name="google.spanner.v1.Spanner.PartitionRead", - index=14, - containing_service=None, - input_type=_PARTITIONREADREQUEST, - output_type=_PARTITIONRESPONSE, - serialized_options=b'\202\323\344\223\002N"I/v1/{session=projects/*/instances/*/databases/*/sessions/*}:partitionRead:\001*', - create_key=_descriptor._internal_create_key, - ), - ], -) -_sym_db.RegisterServiceDescriptor(_SPANNER) - -DESCRIPTOR.services_by_name["Spanner"] = _SPANNER - -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/spanner_v1/proto/spanner_pb2_grpc.py b/google/cloud/spanner_v1/proto/spanner_pb2_grpc.py deleted file mode 100644 index f7591434a9..0000000000 --- a/google/cloud/spanner_v1/proto/spanner_pb2_grpc.py +++ /dev/null @@ -1,819 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - -from google.cloud.spanner_v1.proto import ( - result_set_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2, -) -from google.cloud.spanner_v1.proto import ( - spanner_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2, -) -from google.cloud.spanner_v1.proto import ( - transaction_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -class SpannerStub(object): - """Cloud Spanner API - - The Cloud Spanner API can be used to manage sessions and execute - transactions on data stored in Cloud Spanner databases. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.CreateSession = channel.unary_unary( - "/google.spanner.v1.Spanner/CreateSession", - request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.CreateSessionRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.Session.FromString, - ) - self.BatchCreateSessions = channel.unary_unary( - "/google.spanner.v1.Spanner/BatchCreateSessions", - request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.BatchCreateSessionsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.BatchCreateSessionsResponse.FromString, - ) - self.GetSession = channel.unary_unary( - "/google.spanner.v1.Spanner/GetSession", - request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.GetSessionRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.Session.FromString, - ) - self.ListSessions = channel.unary_unary( - "/google.spanner.v1.Spanner/ListSessions", - request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ListSessionsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ListSessionsResponse.FromString, - ) - self.DeleteSession = channel.unary_unary( - "/google.spanner.v1.Spanner/DeleteSession", - request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.DeleteSessionRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.ExecuteSql = channel.unary_unary( - "/google.spanner.v1.Spanner/ExecuteSql", - request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteSqlRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.ResultSet.FromString, - ) - self.ExecuteStreamingSql = channel.unary_stream( - "/google.spanner.v1.Spanner/ExecuteStreamingSql", - request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteSqlRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.PartialResultSet.FromString, - ) - self.ExecuteBatchDml = channel.unary_unary( - "/google.spanner.v1.Spanner/ExecuteBatchDml", - request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteBatchDmlRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteBatchDmlResponse.FromString, - ) - self.Read = channel.unary_unary( - "/google.spanner.v1.Spanner/Read", - request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ReadRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.ResultSet.FromString, - ) - self.StreamingRead = channel.unary_stream( - "/google.spanner.v1.Spanner/StreamingRead", - request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ReadRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.PartialResultSet.FromString, - ) - self.BeginTransaction = channel.unary_unary( - "/google.spanner.v1.Spanner/BeginTransaction", - request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.BeginTransactionRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2.Transaction.FromString, - ) - self.Commit = channel.unary_unary( - "/google.spanner.v1.Spanner/Commit", - request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.CommitRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.CommitResponse.FromString, - ) - self.Rollback = channel.unary_unary( - "/google.spanner.v1.Spanner/Rollback", - request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.RollbackRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.PartitionQuery = channel.unary_unary( - "/google.spanner.v1.Spanner/PartitionQuery", - request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionQueryRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionResponse.FromString, - ) - self.PartitionRead = channel.unary_unary( - "/google.spanner.v1.Spanner/PartitionRead", - request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionReadRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionResponse.FromString, - ) - - -class SpannerServicer(object): - """Cloud Spanner API - - The Cloud Spanner API can be used to manage sessions and execute - transactions on data stored in Cloud Spanner databases. - """ - - def CreateSession(self, request, context): - """Creates a new session. A session can be used to perform - transactions that read and/or modify data in a Cloud Spanner database. - Sessions are meant to be reused for many consecutive - transactions. - - Sessions can only execute one transaction at a time. To execute - multiple concurrent read-write/write-only transactions, create - multiple sessions. Note that standalone reads and queries use a - transaction internally, and count toward the one transaction - limit. - - Active sessions use additional server resources, so it is a good idea to - delete idle and unneeded sessions. - Aside from explicit deletes, Cloud Spanner may delete sessions for which no - operations are sent for more than an hour. If a session is deleted, - requests to it return `NOT_FOUND`. - - Idle sessions can be kept alive by sending a trivial SQL query - periodically, e.g., `"SELECT 1"`. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def BatchCreateSessions(self, request, context): - """Creates multiple new sessions. - - This API can be used to initialize a session cache on the clients. - See https://goo.gl/TgSFN2 for best practices on session cache management. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetSession(self, request, context): - """Gets a session. Returns `NOT_FOUND` if the session does not exist. - This is mainly useful for determining whether a session is still - alive. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListSessions(self, request, context): - """Lists all sessions in a given database. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteSession(self, request, context): - """Ends a session, releasing server resources associated with it. This will - asynchronously trigger cancellation of any operations that are running with - this session. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ExecuteSql(self, request, context): - """Executes an SQL statement, returning all results in a single reply. This - method cannot be used to return a result set larger than 10 MiB; - if the query yields more data than that, the query fails with - a `FAILED_PRECONDITION` error. - - Operations inside read-write transactions might return `ABORTED`. If - this occurs, the application should restart the transaction from - the beginning. See [Transaction][google.spanner.v1.Transaction] for more details. - - Larger result sets can be fetched in streaming fashion by calling - [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] instead. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ExecuteStreamingSql(self, request, context): - """Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the result - set as a stream. Unlike [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there - is no limit on the size of the returned result set. However, no - individual row in the result set can exceed 100 MiB, and no - column value can exceed 10 MiB. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ExecuteBatchDml(self, request, context): - """Executes a batch of SQL DML statements. This method allows many statements - to be run with lower latency than submitting them sequentially with - [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. - - Statements are executed in sequential order. A request can succeed even if - a statement fails. The [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status] field in the - response provides information about the statement that failed. Clients must - inspect this field to determine whether an error occurred. - - Execution stops after the first failed statement; the remaining statements - are not executed. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def Read(self, request, context): - """Reads rows from the database using key lookups and scans, as a - simple key/value style alternative to - [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method cannot be used to - return a result set larger than 10 MiB; if the read matches more - data than that, the read fails with a `FAILED_PRECONDITION` - error. - - Reads inside read-write transactions might return `ABORTED`. If - this occurs, the application should restart the transaction from - the beginning. See [Transaction][google.spanner.v1.Transaction] for more details. - - Larger result sets can be yielded in streaming fashion by calling - [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def StreamingRead(self, request, context): - """Like [Read][google.spanner.v1.Spanner.Read], except returns the result set as a - stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no limit on the - size of the returned result set. However, no individual row in - the result set can exceed 100 MiB, and no column value can exceed - 10 MiB. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def BeginTransaction(self, request, context): - """Begins a new transaction. This step can often be skipped: - [Read][google.spanner.v1.Spanner.Read], [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and - [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a - side-effect. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def Commit(self, request, context): - """Commits a transaction. The request includes the mutations to be - applied to rows in the database. - - `Commit` might return an `ABORTED` error. This can occur at any time; - commonly, the cause is conflicts with concurrent - transactions. However, it can also happen for a variety of other - reasons. If `Commit` returns `ABORTED`, the caller should re-attempt - the transaction from the beginning, re-using the same session. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def Rollback(self, request, context): - """Rolls back a transaction, releasing any locks it holds. It is a good - idea to call this for any transaction that includes one or more - [Read][google.spanner.v1.Spanner.Read] or [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and - ultimately decides not to commit. - - `Rollback` returns `OK` if it successfully aborts the transaction, the - transaction was already aborted, or the transaction is not - found. `Rollback` never returns `ABORTED`. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def PartitionQuery(self, request, context): - """Creates a set of partition tokens that can be used to execute a query - operation in parallel. Each of the returned partition tokens can be used - by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to specify a subset - of the query result to read. The same session and read-only transaction - must be used by the PartitionQueryRequest used to create the - partition tokens and the ExecuteSqlRequests that use the partition tokens. - - Partition tokens become invalid when the session used to create them - is deleted, is idle for too long, begins a new transaction, or becomes too - old. When any of these happen, it is not possible to resume the query, and - the whole operation must be restarted from the beginning. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def PartitionRead(self, request, context): - """Creates a set of partition tokens that can be used to execute a read - operation in parallel. Each of the returned partition tokens can be used - by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a subset of the read - result to read. The same session and read-only transaction must be used by - the PartitionReadRequest used to create the partition tokens and the - ReadRequests that use the partition tokens. There are no ordering - guarantees on rows returned among the returned partition tokens, or even - within each individual StreamingRead call issued with a partition_token. - - Partition tokens become invalid when the session used to create them - is deleted, is idle for too long, begins a new transaction, or becomes too - old. When any of these happen, it is not possible to resume the read, and - the whole operation must be restarted from the beginning. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_SpannerServicer_to_server(servicer, server): - rpc_method_handlers = { - "CreateSession": grpc.unary_unary_rpc_method_handler( - servicer.CreateSession, - request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.CreateSessionRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.Session.SerializeToString, - ), - "BatchCreateSessions": grpc.unary_unary_rpc_method_handler( - servicer.BatchCreateSessions, - request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.BatchCreateSessionsRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.BatchCreateSessionsResponse.SerializeToString, - ), - "GetSession": grpc.unary_unary_rpc_method_handler( - servicer.GetSession, - request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.GetSessionRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.Session.SerializeToString, - ), - "ListSessions": grpc.unary_unary_rpc_method_handler( - servicer.ListSessions, - request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ListSessionsRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ListSessionsResponse.SerializeToString, - ), - "DeleteSession": grpc.unary_unary_rpc_method_handler( - servicer.DeleteSession, - request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.DeleteSessionRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "ExecuteSql": grpc.unary_unary_rpc_method_handler( - servicer.ExecuteSql, - request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteSqlRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.ResultSet.SerializeToString, - ), - "ExecuteStreamingSql": grpc.unary_stream_rpc_method_handler( - servicer.ExecuteStreamingSql, - request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteSqlRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.PartialResultSet.SerializeToString, - ), - "ExecuteBatchDml": grpc.unary_unary_rpc_method_handler( - servicer.ExecuteBatchDml, - request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteBatchDmlRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteBatchDmlResponse.SerializeToString, - ), - "Read": grpc.unary_unary_rpc_method_handler( - servicer.Read, - request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ReadRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.ResultSet.SerializeToString, - ), - "StreamingRead": grpc.unary_stream_rpc_method_handler( - servicer.StreamingRead, - request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ReadRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.PartialResultSet.SerializeToString, - ), - "BeginTransaction": grpc.unary_unary_rpc_method_handler( - servicer.BeginTransaction, - request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.BeginTransactionRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2.Transaction.SerializeToString, - ), - "Commit": grpc.unary_unary_rpc_method_handler( - servicer.Commit, - request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.CommitRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.CommitResponse.SerializeToString, - ), - "Rollback": grpc.unary_unary_rpc_method_handler( - servicer.Rollback, - request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.RollbackRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "PartitionQuery": grpc.unary_unary_rpc_method_handler( - servicer.PartitionQuery, - request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionQueryRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionResponse.SerializeToString, - ), - "PartitionRead": grpc.unary_unary_rpc_method_handler( - servicer.PartitionRead, - request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionReadRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.spanner.v1.Spanner", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) - - -# This class is part of an EXPERIMENTAL API. -class Spanner(object): - """Cloud Spanner API - - The Cloud Spanner API can be used to manage sessions and execute - transactions on data stored in Cloud Spanner databases. - """ - - @staticmethod - def CreateSession( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.v1.Spanner/CreateSession", - google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.CreateSessionRequest.SerializeToString, - google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.Session.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def BatchCreateSessions( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.v1.Spanner/BatchCreateSessions", - google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.BatchCreateSessionsRequest.SerializeToString, - google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.BatchCreateSessionsResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetSession( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.v1.Spanner/GetSession", - google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.GetSessionRequest.SerializeToString, - google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.Session.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ListSessions( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.v1.Spanner/ListSessions", - google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ListSessionsRequest.SerializeToString, - google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ListSessionsResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def DeleteSession( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.v1.Spanner/DeleteSession", - google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.DeleteSessionRequest.SerializeToString, - google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ExecuteSql( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.v1.Spanner/ExecuteSql", - google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteSqlRequest.SerializeToString, - google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.ResultSet.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ExecuteStreamingSql( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_stream( - request, - target, - "/google.spanner.v1.Spanner/ExecuteStreamingSql", - google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteSqlRequest.SerializeToString, - google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.PartialResultSet.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ExecuteBatchDml( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.v1.Spanner/ExecuteBatchDml", - google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteBatchDmlRequest.SerializeToString, - google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteBatchDmlResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def Read( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.v1.Spanner/Read", - google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ReadRequest.SerializeToString, - google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.ResultSet.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def StreamingRead( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_stream( - request, - target, - "/google.spanner.v1.Spanner/StreamingRead", - google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ReadRequest.SerializeToString, - google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.PartialResultSet.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def BeginTransaction( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.v1.Spanner/BeginTransaction", - google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.BeginTransactionRequest.SerializeToString, - google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2.Transaction.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def Commit( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.v1.Spanner/Commit", - google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.CommitRequest.SerializeToString, - google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.CommitResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def Rollback( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.v1.Spanner/Rollback", - google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.RollbackRequest.SerializeToString, - google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def PartitionQuery( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.v1.Spanner/PartitionQuery", - google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionQueryRequest.SerializeToString, - google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def PartitionRead( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.spanner.v1.Spanner/PartitionRead", - google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionReadRequest.SerializeToString, - google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) diff --git a/google/cloud/spanner_v1/proto/transaction.proto b/google/cloud/spanner_v1/proto/transaction.proto index 0bcbfcf900..5c6f494474 100644 --- a/google/cloud/spanner_v1/proto/transaction.proto +++ b/google/cloud/spanner_v1/proto/transaction.proto @@ -28,283 +28,9 @@ option java_package = "com.google.spanner.v1"; option php_namespace = "Google\\Cloud\\Spanner\\V1"; option ruby_package = "Google::Cloud::Spanner::V1"; -// # Transactions +// TransactionOptions are used to specify different types of transactions. // -// -// Each session can have at most one active transaction at a time. After the -// active transaction is completed, the session can immediately be -// re-used for the next transaction. It is not necessary to create a -// new session for each transaction. -// -// # Transaction Modes -// -// Cloud Spanner supports three transaction modes: -// -// 1. Locking read-write. This type of transaction is the only way -// to write data into Cloud Spanner. These transactions rely on -// pessimistic locking and, if necessary, two-phase commit. -// Locking read-write transactions may abort, requiring the -// application to retry. -// -// 2. Snapshot read-only. This transaction type provides guaranteed -// consistency across several reads, but does not allow -// writes. Snapshot read-only transactions can be configured to -// read at timestamps in the past. Snapshot read-only -// transactions do not need to be committed. -// -// 3. Partitioned DML. This type of transaction is used to execute -// a single Partitioned DML statement. Partitioned DML partitions -// the key space and runs the DML statement over each partition -// in parallel using separate, internal transactions that commit -// independently. Partitioned DML transactions do not need to be -// committed. -// -// For transactions that only read, snapshot read-only transactions -// provide simpler semantics and are almost always faster. In -// particular, read-only transactions do not take locks, so they do -// not conflict with read-write transactions. As a consequence of not -// taking locks, they also do not abort, so retry loops are not needed. -// -// Transactions may only read/write data in a single database. They -// may, however, read/write data in different tables within that -// database. -// -// ## Locking Read-Write Transactions -// -// Locking transactions may be used to atomically read-modify-write -// data anywhere in a database. This type of transaction is externally -// consistent. -// -// Clients should attempt to minimize the amount of time a transaction -// is active. Faster transactions commit with higher probability -// and cause less contention. Cloud Spanner attempts to keep read locks -// active as long as the transaction continues to do reads, and the -// transaction has not been terminated by -// [Commit][google.spanner.v1.Spanner.Commit] or -// [Rollback][google.spanner.v1.Spanner.Rollback]. Long periods of -// inactivity at the client may cause Cloud Spanner to release a -// transaction's locks and abort it. -// -// Conceptually, a read-write transaction consists of zero or more -// reads or SQL statements followed by -// [Commit][google.spanner.v1.Spanner.Commit]. At any time before -// [Commit][google.spanner.v1.Spanner.Commit], the client can send a -// [Rollback][google.spanner.v1.Spanner.Rollback] request to abort the -// transaction. -// -// ### Semantics -// -// Cloud Spanner can commit the transaction if all read locks it acquired -// are still valid at commit time, and it is able to acquire write -// locks for all writes. Cloud Spanner can abort the transaction for any -// reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees -// that the transaction has not modified any user data in Cloud Spanner. -// -// Unless the transaction commits, Cloud Spanner makes no guarantees about -// how long the transaction's locks were held for. It is an error to -// use Cloud Spanner locks for any sort of mutual exclusion other than -// between Cloud Spanner transactions themselves. -// -// ### Retrying Aborted Transactions -// -// When a transaction aborts, the application can choose to retry the -// whole transaction again. To maximize the chances of successfully -// committing the retry, the client should execute the retry in the -// same session as the original attempt. The original session's lock -// priority increases with each consecutive abort, meaning that each -// attempt has a slightly better chance of success than the previous. -// -// Under some circumstances (e.g., many transactions attempting to -// modify the same row(s)), a transaction can abort many times in a -// short period before successfully committing. Thus, it is not a good -// idea to cap the number of retries a transaction can attempt; -// instead, it is better to limit the total amount of wall time spent -// retrying. -// -// ### Idle Transactions -// -// A transaction is considered idle if it has no outstanding reads or -// SQL queries and has not started a read or SQL query within the last 10 -// seconds. Idle transactions can be aborted by Cloud Spanner so that they -// don't hold on to locks indefinitely. In that case, the commit will -// fail with error `ABORTED`. -// -// If this behavior is undesirable, periodically executing a simple -// SQL query in the transaction (e.g., `SELECT 1`) prevents the -// transaction from becoming idle. -// -// ## Snapshot Read-Only Transactions -// -// Snapshot read-only transactions provides a simpler method than -// locking read-write transactions for doing several consistent -// reads. However, this type of transaction does not support writes. -// -// Snapshot transactions do not take locks. Instead, they work by -// choosing a Cloud Spanner timestamp, then executing all reads at that -// timestamp. Since they do not acquire locks, they do not block -// concurrent read-write transactions. -// -// Unlike locking read-write transactions, snapshot read-only -// transactions never abort. They can fail if the chosen read -// timestamp is garbage collected; however, the default garbage -// collection policy is generous enough that most applications do not -// need to worry about this in practice. -// -// Snapshot read-only transactions do not need to call -// [Commit][google.spanner.v1.Spanner.Commit] or -// [Rollback][google.spanner.v1.Spanner.Rollback] (and in fact are not -// permitted to do so). -// -// To execute a snapshot transaction, the client specifies a timestamp -// bound, which tells Cloud Spanner how to choose a read timestamp. -// -// The types of timestamp bound are: -// -// - Strong (the default). -// - Bounded staleness. -// - Exact staleness. -// -// If the Cloud Spanner database to be read is geographically distributed, -// stale read-only transactions can execute more quickly than strong -// or read-write transaction, because they are able to execute far -// from the leader replica. -// -// Each type of timestamp bound is discussed in detail below. -// -// ### Strong -// -// Strong reads are guaranteed to see the effects of all transactions -// that have committed before the start of the read. Furthermore, all -// rows yielded by a single read are consistent with each other -- if -// any part of the read observes a transaction, all parts of the read -// see the transaction. -// -// Strong reads are not repeatable: two consecutive strong read-only -// transactions might return inconsistent results if there are -// concurrent writes. If consistency across reads is required, the -// reads should be executed within a transaction or at an exact read -// timestamp. -// -// See [TransactionOptions.ReadOnly.strong][google.spanner.v1.TransactionOptions.ReadOnly.strong]. -// -// ### Exact Staleness -// -// These timestamp bounds execute reads at a user-specified -// timestamp. Reads at a timestamp are guaranteed to see a consistent -// prefix of the global transaction history: they observe -// modifications done by all transactions with a commit timestamp <= -// the read timestamp, and observe none of the modifications done by -// transactions with a larger commit timestamp. They will block until -// all conflicting transactions that may be assigned commit timestamps -// <= the read timestamp have finished. -// -// The timestamp can either be expressed as an absolute Cloud Spanner commit -// timestamp or a staleness relative to the current time. -// -// These modes do not require a "negotiation phase" to pick a -// timestamp. As a result, they execute slightly faster than the -// equivalent boundedly stale concurrency modes. On the other hand, -// boundedly stale reads usually return fresher results. -// -// See [TransactionOptions.ReadOnly.read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.read_timestamp] and -// [TransactionOptions.ReadOnly.exact_staleness][google.spanner.v1.TransactionOptions.ReadOnly.exact_staleness]. -// -// ### Bounded Staleness -// -// Bounded staleness modes allow Cloud Spanner to pick the read timestamp, -// subject to a user-provided staleness bound. Cloud Spanner chooses the -// newest timestamp within the staleness bound that allows execution -// of the reads at the closest available replica without blocking. -// -// All rows yielded are consistent with each other -- if any part of -// the read observes a transaction, all parts of the read see the -// transaction. Boundedly stale reads are not repeatable: two stale -// reads, even if they use the same staleness bound, can execute at -// different timestamps and thus return inconsistent results. -// -// Boundedly stale reads execute in two phases: the first phase -// negotiates a timestamp among all replicas needed to serve the -// read. In the second phase, reads are executed at the negotiated -// timestamp. -// -// As a result of the two phase execution, bounded staleness reads are -// usually a little slower than comparable exact staleness -// reads. However, they are typically able to return fresher -// results, and are more likely to execute at the closest replica. -// -// Because the timestamp negotiation requires up-front knowledge of -// which rows will be read, it can only be used with single-use -// read-only transactions. -// -// See [TransactionOptions.ReadOnly.max_staleness][google.spanner.v1.TransactionOptions.ReadOnly.max_staleness] and -// [TransactionOptions.ReadOnly.min_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.min_read_timestamp]. -// -// ### Old Read Timestamps and Garbage Collection -// -// Cloud Spanner continuously garbage collects deleted and overwritten data -// in the background to reclaim storage space. This process is known -// as "version GC". By default, version GC reclaims versions after they -// are one hour old. Because of this, Cloud Spanner cannot perform reads -// at read timestamps more than one hour in the past. This -// restriction also applies to in-progress reads and/or SQL queries whose -// timestamp become too old while executing. Reads and SQL queries with -// too-old read timestamps fail with the error `FAILED_PRECONDITION`. -// -// ## Partitioned DML Transactions -// -// Partitioned DML transactions are used to execute DML statements with a -// different execution strategy that provides different, and often better, -// scalability properties for large, table-wide operations than DML in a -// ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, -// should prefer using ReadWrite transactions. -// -// Partitioned DML partitions the keyspace and runs the DML statement on each -// partition in separate, internal transactions. These transactions commit -// automatically when complete, and run independently from one another. -// -// To reduce lock contention, this execution strategy only acquires read locks -// on rows that match the WHERE clause of the statement. Additionally, the -// smaller per-partition transactions hold locks for less time. -// -// That said, Partitioned DML is not a drop-in replacement for standard DML used -// in ReadWrite transactions. -// -// - The DML statement must be fully-partitionable. Specifically, the statement -// must be expressible as the union of many statements which each access only -// a single row of the table. -// -// - The statement is not applied atomically to all rows of the table. Rather, -// the statement is applied atomically to partitions of the table, in -// independent transactions. Secondary index rows are updated atomically -// with the base table rows. -// -// - Partitioned DML does not guarantee exactly-once execution semantics -// against a partition. The statement will be applied at least once to each -// partition. It is strongly recommended that the DML statement should be -// idempotent to avoid unexpected results. For instance, it is potentially -// dangerous to run a statement such as -// `UPDATE table SET column = column + 1` as it could be run multiple times -// against some rows. -// -// - The partitions are committed automatically - there is no support for -// Commit or Rollback. If the call returns an error, or if the client issuing -// the ExecuteSql call dies, it is possible that some rows had the statement -// executed on them successfully. It is also possible that statement was -// never executed against other rows. -// -// - Partitioned DML transactions may only contain the execution of a single -// DML statement via ExecuteSql or ExecuteStreamingSql. -// -// - If any error is encountered during the execution of the partitioned DML -// operation (for instance, a UNIQUE INDEX violation, division by zero, or a -// value that cannot be stored due to schema constraints), then the -// operation is stopped at that point and an error is returned. It is -// possible that at this point, some partitions have been committed (or even -// committed multiple times), and other partitions have not been run at all. -// -// Given the above, Partitioned DML is good fit for large, database-wide, -// operations that are idempotent, such as deleting old rows from a very large -// table. +// For more info, see: https://cloud.google.com/spanner/docs/reference/rest/v1/Transaction message TransactionOptions { // Message type to initiate a read-write transaction. Currently this // transaction type has no options. diff --git a/google/cloud/spanner_v1/proto/transaction_pb2.py b/google/cloud/spanner_v1/proto/transaction_pb2.py deleted file mode 100644 index 865a2446ad..0000000000 --- a/google/cloud/spanner_v1/proto/transaction_pb2.py +++ /dev/null @@ -1,1028 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/spanner_v1/proto/transaction.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/spanner_v1/proto/transaction.proto", - package="google.spanner.v1", - syntax="proto3", - serialized_options=b"\n\025com.google.spanner.v1B\020TransactionProtoP\001Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\252\002\027Google.Cloud.Spanner.V1\312\002\027Google\\Cloud\\Spanner\\V1\352\002\032Google::Cloud::Spanner::V1", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n/google/cloud/spanner_v1/proto/transaction.proto\x12\x11google.spanner.v1\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"\xc3\x04\n\x12TransactionOptions\x12\x45\n\nread_write\x18\x01 \x01(\x0b\x32/.google.spanner.v1.TransactionOptions.ReadWriteH\x00\x12O\n\x0fpartitioned_dml\x18\x03 \x01(\x0b\x32\x34.google.spanner.v1.TransactionOptions.PartitionedDmlH\x00\x12\x43\n\tread_only\x18\x02 \x01(\x0b\x32..google.spanner.v1.TransactionOptions.ReadOnlyH\x00\x1a\x0b\n\tReadWrite\x1a\x10\n\x0ePartitionedDml\x1a\xa8\x02\n\x08ReadOnly\x12\x10\n\x06strong\x18\x01 \x01(\x08H\x00\x12\x38\n\x12min_read_timestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x12\x32\n\rmax_staleness\x18\x03 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x12\x34\n\x0eread_timestamp\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x12\x34\n\x0f\x65xact_staleness\x18\x05 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x12\x1d\n\x15return_read_timestamp\x18\x06 \x01(\x08\x42\x11\n\x0ftimestamp_boundB\x06\n\x04mode"M\n\x0bTransaction\x12\n\n\x02id\x18\x01 \x01(\x0c\x12\x32\n\x0eread_timestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xa4\x01\n\x13TransactionSelector\x12;\n\nsingle_use\x18\x01 \x01(\x0b\x32%.google.spanner.v1.TransactionOptionsH\x00\x12\x0c\n\x02id\x18\x02 \x01(\x0cH\x00\x12\x36\n\x05\x62\x65gin\x18\x03 \x01(\x0b\x32%.google.spanner.v1.TransactionOptionsH\x00\x42\n\n\x08selectorB\xb6\x01\n\x15\x63om.google.spanner.v1B\x10TransactionProtoP\x01Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\xaa\x02\x17Google.Cloud.Spanner.V1\xca\x02\x17Google\\Cloud\\Spanner\\V1\xea\x02\x1aGoogle::Cloud::Spanner::V1b\x06proto3', - dependencies=[ - google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_TRANSACTIONOPTIONS_READWRITE = _descriptor.Descriptor( - name="ReadWrite", - full_name="google.spanner.v1.TransactionOptions.ReadWrite", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=409, - serialized_end=420, -) - -_TRANSACTIONOPTIONS_PARTITIONEDDML = _descriptor.Descriptor( - name="PartitionedDml", - full_name="google.spanner.v1.TransactionOptions.PartitionedDml", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=422, - serialized_end=438, -) - -_TRANSACTIONOPTIONS_READONLY = _descriptor.Descriptor( - name="ReadOnly", - full_name="google.spanner.v1.TransactionOptions.ReadOnly", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="strong", - full_name="google.spanner.v1.TransactionOptions.ReadOnly.strong", - index=0, - number=1, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="min_read_timestamp", - full_name="google.spanner.v1.TransactionOptions.ReadOnly.min_read_timestamp", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="max_staleness", - full_name="google.spanner.v1.TransactionOptions.ReadOnly.max_staleness", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="read_timestamp", - full_name="google.spanner.v1.TransactionOptions.ReadOnly.read_timestamp", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="exact_staleness", - full_name="google.spanner.v1.TransactionOptions.ReadOnly.exact_staleness", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="return_read_timestamp", - full_name="google.spanner.v1.TransactionOptions.ReadOnly.return_read_timestamp", - index=5, - number=6, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="timestamp_bound", - full_name="google.spanner.v1.TransactionOptions.ReadOnly.timestamp_bound", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ) - ], - serialized_start=441, - serialized_end=737, -) - -_TRANSACTIONOPTIONS = _descriptor.Descriptor( - name="TransactionOptions", - full_name="google.spanner.v1.TransactionOptions", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="read_write", - full_name="google.spanner.v1.TransactionOptions.read_write", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="partitioned_dml", - full_name="google.spanner.v1.TransactionOptions.partitioned_dml", - index=1, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="read_only", - full_name="google.spanner.v1.TransactionOptions.read_only", - index=2, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[ - _TRANSACTIONOPTIONS_READWRITE, - _TRANSACTIONOPTIONS_PARTITIONEDDML, - _TRANSACTIONOPTIONS_READONLY, - ], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="mode", - full_name="google.spanner.v1.TransactionOptions.mode", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ) - ], - serialized_start=166, - serialized_end=745, -) - - -_TRANSACTION = _descriptor.Descriptor( - name="Transaction", - full_name="google.spanner.v1.Transaction", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="id", - full_name="google.spanner.v1.Transaction.id", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="read_timestamp", - full_name="google.spanner.v1.Transaction.read_timestamp", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=747, - serialized_end=824, -) - - -_TRANSACTIONSELECTOR = _descriptor.Descriptor( - name="TransactionSelector", - full_name="google.spanner.v1.TransactionSelector", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="single_use", - full_name="google.spanner.v1.TransactionSelector.single_use", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="id", - full_name="google.spanner.v1.TransactionSelector.id", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="begin", - full_name="google.spanner.v1.TransactionSelector.begin", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="selector", - full_name="google.spanner.v1.TransactionSelector.selector", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ) - ], - serialized_start=827, - serialized_end=991, -) - -_TRANSACTIONOPTIONS_READWRITE.containing_type = _TRANSACTIONOPTIONS -_TRANSACTIONOPTIONS_PARTITIONEDDML.containing_type = _TRANSACTIONOPTIONS -_TRANSACTIONOPTIONS_READONLY.fields_by_name[ - "min_read_timestamp" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_TRANSACTIONOPTIONS_READONLY.fields_by_name[ - "max_staleness" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_TRANSACTIONOPTIONS_READONLY.fields_by_name[ - "read_timestamp" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_TRANSACTIONOPTIONS_READONLY.fields_by_name[ - "exact_staleness" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_TRANSACTIONOPTIONS_READONLY.containing_type = _TRANSACTIONOPTIONS -_TRANSACTIONOPTIONS_READONLY.oneofs_by_name["timestamp_bound"].fields.append( - _TRANSACTIONOPTIONS_READONLY.fields_by_name["strong"] -) -_TRANSACTIONOPTIONS_READONLY.fields_by_name[ - "strong" -].containing_oneof = _TRANSACTIONOPTIONS_READONLY.oneofs_by_name["timestamp_bound"] -_TRANSACTIONOPTIONS_READONLY.oneofs_by_name["timestamp_bound"].fields.append( - _TRANSACTIONOPTIONS_READONLY.fields_by_name["min_read_timestamp"] -) -_TRANSACTIONOPTIONS_READONLY.fields_by_name[ - "min_read_timestamp" -].containing_oneof = _TRANSACTIONOPTIONS_READONLY.oneofs_by_name["timestamp_bound"] -_TRANSACTIONOPTIONS_READONLY.oneofs_by_name["timestamp_bound"].fields.append( - _TRANSACTIONOPTIONS_READONLY.fields_by_name["max_staleness"] -) -_TRANSACTIONOPTIONS_READONLY.fields_by_name[ - "max_staleness" -].containing_oneof = _TRANSACTIONOPTIONS_READONLY.oneofs_by_name["timestamp_bound"] -_TRANSACTIONOPTIONS_READONLY.oneofs_by_name["timestamp_bound"].fields.append( - _TRANSACTIONOPTIONS_READONLY.fields_by_name["read_timestamp"] -) -_TRANSACTIONOPTIONS_READONLY.fields_by_name[ - "read_timestamp" -].containing_oneof = _TRANSACTIONOPTIONS_READONLY.oneofs_by_name["timestamp_bound"] -_TRANSACTIONOPTIONS_READONLY.oneofs_by_name["timestamp_bound"].fields.append( - _TRANSACTIONOPTIONS_READONLY.fields_by_name["exact_staleness"] -) -_TRANSACTIONOPTIONS_READONLY.fields_by_name[ - "exact_staleness" -].containing_oneof = _TRANSACTIONOPTIONS_READONLY.oneofs_by_name["timestamp_bound"] -_TRANSACTIONOPTIONS.fields_by_name[ - "read_write" -].message_type = _TRANSACTIONOPTIONS_READWRITE -_TRANSACTIONOPTIONS.fields_by_name[ - "partitioned_dml" -].message_type = _TRANSACTIONOPTIONS_PARTITIONEDDML -_TRANSACTIONOPTIONS.fields_by_name[ - "read_only" -].message_type = _TRANSACTIONOPTIONS_READONLY -_TRANSACTIONOPTIONS.oneofs_by_name["mode"].fields.append( - _TRANSACTIONOPTIONS.fields_by_name["read_write"] -) -_TRANSACTIONOPTIONS.fields_by_name[ - "read_write" -].containing_oneof = _TRANSACTIONOPTIONS.oneofs_by_name["mode"] -_TRANSACTIONOPTIONS.oneofs_by_name["mode"].fields.append( - _TRANSACTIONOPTIONS.fields_by_name["partitioned_dml"] -) -_TRANSACTIONOPTIONS.fields_by_name[ - "partitioned_dml" -].containing_oneof = _TRANSACTIONOPTIONS.oneofs_by_name["mode"] -_TRANSACTIONOPTIONS.oneofs_by_name["mode"].fields.append( - _TRANSACTIONOPTIONS.fields_by_name["read_only"] -) -_TRANSACTIONOPTIONS.fields_by_name[ - "read_only" -].containing_oneof = _TRANSACTIONOPTIONS.oneofs_by_name["mode"] -_TRANSACTION.fields_by_name[ - "read_timestamp" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_TRANSACTIONSELECTOR.fields_by_name["single_use"].message_type = _TRANSACTIONOPTIONS -_TRANSACTIONSELECTOR.fields_by_name["begin"].message_type = _TRANSACTIONOPTIONS -_TRANSACTIONSELECTOR.oneofs_by_name["selector"].fields.append( - _TRANSACTIONSELECTOR.fields_by_name["single_use"] -) -_TRANSACTIONSELECTOR.fields_by_name[ - "single_use" -].containing_oneof = _TRANSACTIONSELECTOR.oneofs_by_name["selector"] -_TRANSACTIONSELECTOR.oneofs_by_name["selector"].fields.append( - _TRANSACTIONSELECTOR.fields_by_name["id"] -) -_TRANSACTIONSELECTOR.fields_by_name[ - "id" -].containing_oneof = _TRANSACTIONSELECTOR.oneofs_by_name["selector"] -_TRANSACTIONSELECTOR.oneofs_by_name["selector"].fields.append( - _TRANSACTIONSELECTOR.fields_by_name["begin"] -) -_TRANSACTIONSELECTOR.fields_by_name[ - "begin" -].containing_oneof = _TRANSACTIONSELECTOR.oneofs_by_name["selector"] -DESCRIPTOR.message_types_by_name["TransactionOptions"] = _TRANSACTIONOPTIONS -DESCRIPTOR.message_types_by_name["Transaction"] = _TRANSACTION -DESCRIPTOR.message_types_by_name["TransactionSelector"] = _TRANSACTIONSELECTOR -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -TransactionOptions = _reflection.GeneratedProtocolMessageType( - "TransactionOptions", - (_message.Message,), - { - "ReadWrite": _reflection.GeneratedProtocolMessageType( - "ReadWrite", - (_message.Message,), - { - "DESCRIPTOR": _TRANSACTIONOPTIONS_READWRITE, - "__module__": "google.cloud.spanner_v1.proto.transaction_pb2", - "__doc__": """Message type to initiate a read-write transaction. Currently this - transaction type has no options.""", - # @@protoc_insertion_point(class_scope:google.spanner.v1.TransactionOptions.ReadWrite) - }, - ), - "PartitionedDml": _reflection.GeneratedProtocolMessageType( - "PartitionedDml", - (_message.Message,), - { - "DESCRIPTOR": _TRANSACTIONOPTIONS_PARTITIONEDDML, - "__module__": "google.cloud.spanner_v1.proto.transaction_pb2", - "__doc__": """Message type to initiate a Partitioned DML transaction.""", - # @@protoc_insertion_point(class_scope:google.spanner.v1.TransactionOptions.PartitionedDml) - }, - ), - "ReadOnly": _reflection.GeneratedProtocolMessageType( - "ReadOnly", - (_message.Message,), - { - "DESCRIPTOR": _TRANSACTIONOPTIONS_READONLY, - "__module__": "google.cloud.spanner_v1.proto.transaction_pb2", - "__doc__": """Message type to initiate a read-only transaction. - - Attributes: - timestamp_bound: - How to choose the timestamp for the read-only transaction. - strong: - Read at a timestamp where all previously committed - transactions are visible. - min_read_timestamp: - Executes all reads at a timestamp >= ``min_read_timestamp``. - This is useful for requesting fresher data than some previous - read, or data that is fresh enough to observe the effects of - some previously committed transaction whose timestamp is - known. Note that this option can only be used in single-use - transactions. A timestamp in RFC3339 UTC "Zulu" format, - accurate to nanoseconds. Example: - ``"2014-10-02T15:01:23.045123456Z"``. - max_staleness: - Read data at a timestamp >= ``NOW - max_staleness`` seconds. - Guarantees that all writes that have committed more than the - specified number of seconds ago are visible. Because Cloud - Spanner chooses the exact timestamp, this mode works even if - the client’s local clock is substantially skewed from Cloud - Spanner commit timestamps. Useful for reading the freshest - data available at a nearby replica, while bounding the - possible staleness if the local replica has fallen behind. - Note that this option can only be used in single-use - transactions. - read_timestamp: - Executes all reads at the given timestamp. Unlike other modes, - reads at a specific timestamp are repeatable; the same read at - the same timestamp always returns the same data. If the - timestamp is in the future, the read will block until the - specified timestamp, modulo the read’s deadline. Useful for - large scale consistent reads such as mapreduces, or for - coordinating many reads against a consistent snapshot of the - data. A timestamp in RFC3339 UTC "Zulu" format, accurate to - nanoseconds. Example: ``"2014-10-02T15:01:23.045123456Z"``. - exact_staleness: - Executes all reads at a timestamp that is ``exact_staleness`` - old. The timestamp is chosen soon after the read is started. - Guarantees that all writes that have committed more than the - specified number of seconds ago are visible. Because Cloud - Spanner chooses the exact timestamp, this mode works even if - the client’s local clock is substantially skewed from Cloud - Spanner commit timestamps. Useful for reading at nearby - replicas without the distributed timestamp negotiation - overhead of ``max_staleness``. - return_read_timestamp: - If true, the Cloud Spanner-selected read timestamp is included - in the [Transaction][google.spanner.v1.Transaction] message - that describes the transaction. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.TransactionOptions.ReadOnly) - }, - ), - "DESCRIPTOR": _TRANSACTIONOPTIONS, - "__module__": "google.cloud.spanner_v1.proto.transaction_pb2", - "__doc__": """# Transactions - - Each session can have at most one active transaction at a time. After - the active transaction is completed, the session can immediately be - re-used for the next transaction. It is not necessary to create a new - session for each transaction. - - Transaction Modes - - - Cloud Spanner supports three transaction modes: - - 1. Locking read-write. This type of transaction is the only way to write - data into Cloud Spanner. These transactions rely on pessimistic - locking and, if necessary, two-phase commit. Locking read-write - transactions may abort, requiring the application to retry. - - 2. Snapshot read-only. This transaction type provides guaranteed - consistency across several reads, but does not allow writes. Snapshot - read-only transactions can be configured to read at timestamps in the - past. Snapshot read-only transactions do not need to be committed. - - 3. Partitioned DML. This type of transaction is used to execute a single - Partitioned DML statement. Partitioned DML partitions the key space - and runs the DML statement over each partition in parallel using - separate, internal transactions that commit independently. - Partitioned DML transactions do not need to be committed. - - For transactions that only read, snapshot read-only transactions provide - simpler semantics and are almost always faster. In particular, read-only - transactions do not take locks, so they do not conflict with read-write - transactions. As a consequence of not taking locks, they also do not - abort, so retry loops are not needed. - - Transactions may only read/write data in a single database. They may, - however, read/write data in different tables within that database. - - Locking Read-Write Transactions - - - Locking transactions may be used to atomically read-modify-write data - anywhere in a database. This type of transaction is externally - consistent. - - Clients should attempt to minimize the amount of time a transaction is - active. Faster transactions commit with higher probability and cause - less contention. Cloud Spanner attempts to keep read locks active as - long as the transaction continues to do reads, and the transaction has - not been terminated by [Commit][google.spanner.v1.Spanner.Commit] or - [Rollback][google.spanner.v1.Spanner.Rollback]. Long periods of - inactivity at the client may cause Cloud Spanner to release a - transaction’s locks and abort it. - - Conceptually, a read-write transaction consists of zero or more reads or - SQL statements followed by [Commit][google.spanner.v1.Spanner.Commit]. - At any time before [Commit][google.spanner.v1.Spanner.Commit], the - client can send a [Rollback][google.spanner.v1.Spanner.Rollback] request - to abort the transaction. - - Semantics - - - Cloud Spanner can commit the transaction if all read locks it acquired - are still valid at commit time, and it is able to acquire write locks - for all writes. Cloud Spanner can abort the transaction for any reason. - If a commit attempt returns ``ABORTED``, Cloud Spanner guarantees that - the transaction has not modified any user data in Cloud Spanner. - - Unless the transaction commits, Cloud Spanner makes no guarantees about - how long the transaction’s locks were held for. It is an error to use - Cloud Spanner locks for any sort of mutual exclusion other than between - Cloud Spanner transactions themselves. - - Retrying Aborted Transactions - - - When a transaction aborts, the application can choose to retry the whole - transaction again. To maximize the chances of successfully committing - the retry, the client should execute the retry in the same session as - the original attempt. The original session’s lock priority increases - with each consecutive abort, meaning that each attempt has a slightly - better chance of success than the previous. - - Under some circumstances (e.g., many transactions attempting to modify - the same row(s)), a transaction can abort many times in a short period - before successfully committing. Thus, it is not a good idea to cap the - number of retries a transaction can attempt; instead, it is better to - limit the total amount of wall time spent retrying. - - Idle Transactions - - - A transaction is considered idle if it has no outstanding reads or SQL - queries and has not started a read or SQL query within the last 10 - seconds. Idle transactions can be aborted by Cloud Spanner so that they - don’t hold on to locks indefinitely. In that case, the commit will fail - with error ``ABORTED``. - - If this behavior is undesirable, periodically executing a simple SQL - query in the transaction (e.g., ``SELECT 1``) prevents the transaction - from becoming idle. - - Snapshot Read-Only Transactions - - - Snapshot read-only transactions provides a simpler method than locking - read-write transactions for doing several consistent reads. However, - this type of transaction does not support writes. - - Snapshot transactions do not take locks. Instead, they work by choosing - a Cloud Spanner timestamp, then executing all reads at that timestamp. - Since they do not acquire locks, they do not block concurrent read-write - transactions. - - Unlike locking read-write transactions, snapshot read-only transactions - never abort. They can fail if the chosen read timestamp is garbage - collected; however, the default garbage collection policy is generous - enough that most applications do not need to worry about this in - practice. - - Snapshot read-only transactions do not need to call - [Commit][google.spanner.v1.Spanner.Commit] or - [Rollback][google.spanner.v1.Spanner.Rollback] (and in fact are not - permitted to do so). - - To execute a snapshot transaction, the client specifies a timestamp - bound, which tells Cloud Spanner how to choose a read timestamp. - - The types of timestamp bound are: - - - Strong (the default). - - Bounded staleness. - - Exact staleness. - - If the Cloud Spanner database to be read is geographically distributed, - stale read-only transactions can execute more quickly than strong or - read-write transaction, because they are able to execute far from the - leader replica. - - Each type of timestamp bound is discussed in detail below. - - Strong - - - Strong reads are guaranteed to see the effects of all transactions that - have committed before the start of the read. Furthermore, all rows - yielded by a single read are consistent with each other -- if any part - of the read observes a transaction, all parts of the read see the - transaction. - - Strong reads are not repeatable: two consecutive strong read-only - transactions might return inconsistent results if there are concurrent - writes. If consistency across reads is required, the reads should be - executed within a transaction or at an exact read timestamp. - - See - [TransactionOptions.ReadOnly.strong][google.spanner.v1.TransactionOptions.ReadOnly.strong]. - - Exact Staleness - - - These timestamp bounds execute reads at a user-specified timestamp. - Reads at a timestamp are guaranteed to see a consistent prefix of the - global transaction history: they observe modifications done by all - transactions with a commit timestamp <= the read timestamp, and observe - none of the modifications done by transactions with a larger commit - timestamp. They will block until all conflicting transactions that may - be assigned commit timestamps <= the read timestamp have finished. - - The timestamp can either be expressed as an absolute Cloud Spanner - commit timestamp or a staleness relative to the current time. - - These modes do not require a "negotiation phase" to pick a timestamp. As - a result, they execute slightly faster than the equivalent boundedly - stale concurrency modes. On the other hand, boundedly stale reads - usually return fresher results. - - See - [TransactionOptions.ReadOnly.read\_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.read\_timestamp] - and - [TransactionOptions.ReadOnly.exact\_staleness][google.spanner.v1.TransactionOptions.ReadOnly.exact\_staleness]. - - Bounded Staleness - - - Bounded staleness modes allow Cloud Spanner to pick the read timestamp, - subject to a user-provided staleness bound. Cloud Spanner chooses the - newest timestamp within the staleness bound that allows execution of the - reads at the closest available replica without blocking. - - All rows yielded are consistent with each other -- if any part of the - read observes a transaction, all parts of the read see the transaction. - Boundedly stale reads are not repeatable: two stale reads, even if they - use the same staleness bound, can execute at different timestamps and - thus return inconsistent results. - - Boundedly stale reads execute in two phases: the first phase negotiates - a timestamp among all replicas needed to serve the read. In the second - phase, reads are executed at the negotiated timestamp. - - As a result of the two phase execution, bounded staleness reads are - usually a little slower than comparable exact staleness reads. However, - they are typically able to return fresher results, and are more likely - to execute at the closest replica. - - Because the timestamp negotiation requires up-front knowledge of which - rows will be read, it can only be used with single-use read-only - transactions. - - See - [TransactionOptions.ReadOnly.max\_staleness][google.spanner.v1.TransactionOptions.ReadOnly.max\_staleness] - and - [TransactionOptions.ReadOnly.min\_read\_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.min\_read\_timestamp]. - - Old Read Timestamps and Garbage Collection - - - Cloud Spanner continuously garbage collects deleted and overwritten data - in the background to reclaim storage space. This process is known as - "version GC". By default, version GC reclaims versions after they are - one hour old. Because of this, Cloud Spanner cannot perform reads at - read timestamps more than one hour in the past. This restriction also - applies to in-progress reads and/or SQL queries whose timestamp become - too old while executing. Reads and SQL queries with too-old read - timestamps fail with the error ``FAILED_PRECONDITION``. - - Partitioned DML Transactions - - - Partitioned DML transactions are used to execute DML statements with a - different execution strategy that provides different, and often better, - scalability properties for large, table-wide operations than DML in a - ReadWrite transaction. Smaller scoped statements, such as an OLTP - workload, should prefer using ReadWrite transactions. - - Partitioned DML partitions the keyspace and runs the DML statement on - each partition in separate, internal transactions. These transactions - commit automatically when complete, and run independently from one - another. - - To reduce lock contention, this execution strategy only acquires read - locks on rows that match the WHERE clause of the statement. - Additionally, the smaller per-partition transactions hold locks for less - time. - - That said, Partitioned DML is not a drop-in replacement for standard DML - used in ReadWrite transactions. - - - The DML statement must be fully-partitionable. Specifically, the - statement must be expressible as the union of many statements which - each access only a single row of the table. - - - The statement is not applied atomically to all rows of the table. - Rather, the statement is applied atomically to partitions of the - table, in independent transactions. Secondary index rows are updated - atomically with the base table rows. - - - Partitioned DML does not guarantee exactly-once execution semantics - against a partition. The statement will be applied at least once to - each partition. It is strongly recommended that the DML statement - should be idempotent to avoid unexpected results. For instance, it is - potentially dangerous to run a statement such as - ``UPDATE table SET column = column + 1`` as it could be run multiple - times against some rows. - - - The partitions are committed automatically - there is no support for - Commit or Rollback. If the call returns an error, or if the client - issuing the ExecuteSql call dies, it is possible that some rows had - the statement executed on them successfully. It is also possible that - statement was never executed against other rows. - - - Partitioned DML transactions may only contain the execution of a - single DML statement via ExecuteSql or ExecuteStreamingSql. - - - If any error is encountered during the execution of the partitioned - DML operation (for instance, a UNIQUE INDEX violation, division by - zero, or a value that cannot be stored due to schema constraints), - then the operation is stopped at that point and an error is returned. - It is possible that at this point, some partitions have been - committed (or even committed multiple times), and other partitions - have not been run at all. - - Given the above, Partitioned DML is good fit for large, database-wide, - operations that are idempotent, such as deleting old rows from a very - large table. - - Attributes: - mode: - Required. The type of transaction. - read_write: - Transaction may write. Authorization to begin a read-write - transaction requires - ``spanner.databases.beginOrRollbackReadWriteTransaction`` - permission on the ``session`` resource. - partitioned_dml: - Partitioned DML transaction. Authorization to begin a - Partitioned DML transaction requires - ``spanner.databases.beginPartitionedDmlTransaction`` - permission on the ``session`` resource. - read_only: - Transaction will not write. Authorization to begin a read- - only transaction requires - ``spanner.databases.beginReadOnlyTransaction`` permission on - the ``session`` resource. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.TransactionOptions) - }, -) -_sym_db.RegisterMessage(TransactionOptions) -_sym_db.RegisterMessage(TransactionOptions.ReadWrite) -_sym_db.RegisterMessage(TransactionOptions.PartitionedDml) -_sym_db.RegisterMessage(TransactionOptions.ReadOnly) - -Transaction = _reflection.GeneratedProtocolMessageType( - "Transaction", - (_message.Message,), - { - "DESCRIPTOR": _TRANSACTION, - "__module__": "google.cloud.spanner_v1.proto.transaction_pb2", - "__doc__": """A transaction. - - Attributes: - id: - \ ``id`` may be used to identify the transaction in subsequent - [Read][google.spanner.v1.Spanner.Read], - [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], - [Commit][google.spanner.v1.Spanner.Commit], or - [Rollback][google.spanner.v1.Spanner.Rollback] calls. Single- - use read-only transactions do not have IDs, because single-use - transactions do not support multiple requests. - read_timestamp: - For snapshot read-only transactions, the read timestamp chosen - for the transaction. Not returned by default: see [Transaction - Options.ReadOnly.return\_read\_timestamp][google.spanner.v1.Tr - ansactionOptions.ReadOnly.return\_read\_timestamp]. A - timestamp in RFC3339 UTC "Zulu" format, accurate to - nanoseconds. Example: ``"2014-10-02T15:01:23.045123456Z"``. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.Transaction) - }, -) -_sym_db.RegisterMessage(Transaction) - -TransactionSelector = _reflection.GeneratedProtocolMessageType( - "TransactionSelector", - (_message.Message,), - { - "DESCRIPTOR": _TRANSACTIONSELECTOR, - "__module__": "google.cloud.spanner_v1.proto.transaction_pb2", - "__doc__": """This message is used to select the transaction in which a - [Read][google.spanner.v1.Spanner.Read] or - [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] call runs. - - See [TransactionOptions][google.spanner.v1.TransactionOptions] for more - information about transactions. - - Attributes: - selector: - If no fields are set, the default is a single use transaction - with strong concurrency. - single_use: - Execute the read or SQL query in a temporary transaction. This - is the most efficient way to execute a transaction that - consists of a single SQL query. - id: - Execute the read or SQL query in a previously-started - transaction. - begin: - Begin a new transaction and execute this read or SQL query in - it. The transaction ID of the new transaction is returned in [ - ResultSetMetadata.transaction][google.spanner.v1.ResultSetMeta - data.transaction], which is a - [Transaction][google.spanner.v1.Transaction]. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.TransactionSelector) - }, -) -_sym_db.RegisterMessage(TransactionSelector) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/spanner_v1/proto/transaction_pb2_grpc.py b/google/cloud/spanner_v1/proto/transaction_pb2_grpc.py deleted file mode 100644 index 8a9393943b..0000000000 --- a/google/cloud/spanner_v1/proto/transaction_pb2_grpc.py +++ /dev/null @@ -1,3 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc diff --git a/google/cloud/spanner_v1/proto/type.proto b/google/cloud/spanner_v1/proto/type.proto index 1e5e5ff313..1b863c0fdf 100644 --- a/google/cloud/spanner_v1/proto/type.proto +++ b/google/cloud/spanner_v1/proto/type.proto @@ -50,7 +50,7 @@ message StructType { // SQL queries, it is the column alias (e.g., `"Word"` in the // query `"SELECT 'hello' AS Word"`), or the column name (e.g., // `"ColName"` in the query `"SELECT ColName FROM Table"`). Some - // columns might have an empty name (e.g., !"SELECT + // columns might have an empty name (e.g., `"SELECT // UPPER(ColName)"`). Note that a query result can contain // multiple fields with the same name. string name = 1; diff --git a/google/cloud/spanner_v1/proto/type_pb2.py b/google/cloud/spanner_v1/proto/type_pb2.py deleted file mode 100644 index 8e763fd247..0000000000 --- a/google/cloud/spanner_v1/proto/type_pb2.py +++ /dev/null @@ -1,418 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/spanner_v1/proto/type.proto - -from google.protobuf.internal import enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/spanner_v1/proto/type.proto", - package="google.spanner.v1", - syntax="proto3", - serialized_options=b"\n\025com.google.spanner.v1B\tTypeProtoP\001Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\252\002\027Google.Cloud.Spanner.V1\312\002\027Google\\Cloud\\Spanner\\V1\352\002\032Google::Cloud::Spanner::V1", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n(google/cloud/spanner_v1/proto/type.proto\x12\x11google.spanner.v1\x1a\x1fgoogle/api/field_behavior.proto\x1a\x1cgoogle/api/annotations.proto"\x9f\x01\n\x04Type\x12.\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x1b.google.spanner.v1.TypeCodeB\x03\xe0\x41\x02\x12\x33\n\x12\x61rray_element_type\x18\x02 \x01(\x0b\x32\x17.google.spanner.v1.Type\x12\x32\n\x0bstruct_type\x18\x03 \x01(\x0b\x32\x1d.google.spanner.v1.StructType"\x7f\n\nStructType\x12\x33\n\x06\x66ields\x18\x01 \x03(\x0b\x32#.google.spanner.v1.StructType.Field\x1a<\n\x05\x46ield\x12\x0c\n\x04name\x18\x01 \x01(\t\x12%\n\x04type\x18\x02 \x01(\x0b\x32\x17.google.spanner.v1.Type*\x9b\x01\n\x08TypeCode\x12\x19\n\x15TYPE_CODE_UNSPECIFIED\x10\x00\x12\x08\n\x04\x42OOL\x10\x01\x12\t\n\x05INT64\x10\x02\x12\x0b\n\x07\x46LOAT64\x10\x03\x12\r\n\tTIMESTAMP\x10\x04\x12\x08\n\x04\x44\x41TE\x10\x05\x12\n\n\x06STRING\x10\x06\x12\t\n\x05\x42YTES\x10\x07\x12\t\n\x05\x41RRAY\x10\x08\x12\n\n\x06STRUCT\x10\t\x12\x0b\n\x07NUMERIC\x10\nB\xaf\x01\n\x15\x63om.google.spanner.v1B\tTypeProtoP\x01Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\xaa\x02\x17Google.Cloud.Spanner.V1\xca\x02\x17Google\\Cloud\\Spanner\\V1\xea\x02\x1aGoogle::Cloud::Spanner::V1b\x06proto3', - dependencies=[ - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - -_TYPECODE = _descriptor.EnumDescriptor( - name="TypeCode", - full_name="google.spanner.v1.TypeCode", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="TYPE_CODE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="BOOL", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="INT64", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="FLOAT64", - index=3, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="TIMESTAMP", - index=4, - number=4, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="DATE", - index=5, - number=5, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="STRING", - index=6, - number=6, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="BYTES", - index=7, - number=7, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="ARRAY", - index=8, - number=8, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="STRUCT", - index=9, - number=9, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="NUMERIC", - index=10, - number=10, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=418, - serialized_end=573, -) -_sym_db.RegisterEnumDescriptor(_TYPECODE) - -TypeCode = enum_type_wrapper.EnumTypeWrapper(_TYPECODE) -TYPE_CODE_UNSPECIFIED = 0 -BOOL = 1 -INT64 = 2 -FLOAT64 = 3 -TIMESTAMP = 4 -DATE = 5 -STRING = 6 -BYTES = 7 -ARRAY = 8 -STRUCT = 9 -NUMERIC = 10 - - -_TYPE = _descriptor.Descriptor( - name="Type", - full_name="google.spanner.v1.Type", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="code", - full_name="google.spanner.v1.Type.code", - index=0, - number=1, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="array_element_type", - full_name="google.spanner.v1.Type.array_element_type", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="struct_type", - full_name="google.spanner.v1.Type.struct_type", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=127, - serialized_end=286, -) - - -_STRUCTTYPE_FIELD = _descriptor.Descriptor( - name="Field", - full_name="google.spanner.v1.StructType.Field", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.spanner.v1.StructType.Field.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="type", - full_name="google.spanner.v1.StructType.Field.type", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=355, - serialized_end=415, -) - -_STRUCTTYPE = _descriptor.Descriptor( - name="StructType", - full_name="google.spanner.v1.StructType", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="fields", - full_name="google.spanner.v1.StructType.fields", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ) - ], - extensions=[], - nested_types=[_STRUCTTYPE_FIELD], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=288, - serialized_end=415, -) - -_TYPE.fields_by_name["code"].enum_type = _TYPECODE -_TYPE.fields_by_name["array_element_type"].message_type = _TYPE -_TYPE.fields_by_name["struct_type"].message_type = _STRUCTTYPE -_STRUCTTYPE_FIELD.fields_by_name["type"].message_type = _TYPE -_STRUCTTYPE_FIELD.containing_type = _STRUCTTYPE -_STRUCTTYPE.fields_by_name["fields"].message_type = _STRUCTTYPE_FIELD -DESCRIPTOR.message_types_by_name["Type"] = _TYPE -DESCRIPTOR.message_types_by_name["StructType"] = _STRUCTTYPE -DESCRIPTOR.enum_types_by_name["TypeCode"] = _TYPECODE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -Type = _reflection.GeneratedProtocolMessageType( - "Type", - (_message.Message,), - { - "DESCRIPTOR": _TYPE, - "__module__": "google.cloud.spanner_v1.proto.type_pb2", - "__doc__": """\ ``Type`` indicates the type of a Cloud Spanner value, as might be - stored in a table cell or returned from an SQL query. - - Attributes: - code: - Required. The [TypeCode][google.spanner.v1.TypeCode] for this - type. - array_element_type: - If [code][google.spanner.v1.Type.code] == - [ARRAY][google.spanner.v1.TypeCode.ARRAY], then - ``array_element_type`` is the type of the array elements. - struct_type: - If [code][google.spanner.v1.Type.code] == - [STRUCT][google.spanner.v1.TypeCode.STRUCT], then - ``struct_type`` provides type information for the struct’s - fields. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.Type) - }, -) -_sym_db.RegisterMessage(Type) - -StructType = _reflection.GeneratedProtocolMessageType( - "StructType", - (_message.Message,), - { - "Field": _reflection.GeneratedProtocolMessageType( - "Field", - (_message.Message,), - { - "DESCRIPTOR": _STRUCTTYPE_FIELD, - "__module__": "google.cloud.spanner_v1.proto.type_pb2", - "__doc__": """Message representing a single field of a struct. - - Attributes: - name: - The name of the field. For reads, this is the column name. For - SQL queries, it is the column alias (e.g., ``"Word"`` in the - query ``"SELECT 'hello' AS Word"``), or the column name (e.g., - ``"ColName"`` in the query ``"SELECT ColName FROM Table"``). - Some columns might have an empty name (e.g., !“SELECT - UPPER(ColName)”\`). Note that a query result can contain - multiple fields with the same name. - type: - The type of the field. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.StructType.Field) - }, - ), - "DESCRIPTOR": _STRUCTTYPE, - "__module__": "google.cloud.spanner_v1.proto.type_pb2", - "__doc__": """\ ``StructType`` defines the fields of a - [STRUCT][google.spanner.v1.TypeCode.STRUCT] type. - - Attributes: - fields: - The list of fields that make up this struct. Order is - significant, because values of this struct type are - represented as lists, where the order of field values matches - the order of fields in the - [StructType][google.spanner.v1.StructType]. In turn, the order - of fields matches the order of columns in a read request, or - the order of fields in the ``SELECT`` clause of a query. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.StructType) - }, -) -_sym_db.RegisterMessage(StructType) -_sym_db.RegisterMessage(StructType.Field) - - -DESCRIPTOR._options = None -_TYPE.fields_by_name["code"]._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/spanner_v1/proto/type_pb2_grpc.py b/google/cloud/spanner_v1/proto/type_pb2_grpc.py deleted file mode 100644 index 8a9393943b..0000000000 --- a/google/cloud/spanner_v1/proto/type_pb2_grpc.py +++ /dev/null @@ -1,3 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc diff --git a/google/cloud/spanner_v1/py.typed b/google/cloud/spanner_v1/py.typed new file mode 100644 index 0000000000..0989eccd04 --- /dev/null +++ b/google/cloud/spanner_v1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-spanner package uses inline types. diff --git a/google/cloud/spanner_v1/services/__init__.py b/google/cloud/spanner_v1/services/__init__.py new file mode 100644 index 0000000000..42ffdf2bc4 --- /dev/null +++ b/google/cloud/spanner_v1/services/__init__.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/google/cloud/spanner_v1/services/spanner/__init__.py b/google/cloud/spanner_v1/services/spanner/__init__.py new file mode 100644 index 0000000000..d00c69053d --- /dev/null +++ b/google/cloud/spanner_v1/services/spanner/__init__.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .client import SpannerClient +from .async_client import SpannerAsyncClient + +__all__ = ( + "SpannerClient", + "SpannerAsyncClient", +) diff --git a/google/cloud/spanner_v1/services/spanner/async_client.py b/google/cloud/spanner_v1/services/spanner/async_client.py new file mode 100644 index 0000000000..ab84b7d885 --- /dev/null +++ b/google/cloud/spanner_v1/services/spanner/async_client.py @@ -0,0 +1,1402 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, AsyncIterable, Awaitable, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.spanner_v1.services.spanner import pagers +from google.cloud.spanner_v1.types import mutation +from google.cloud.spanner_v1.types import result_set +from google.cloud.spanner_v1.types import spanner +from google.cloud.spanner_v1.types import transaction +from google.protobuf import struct_pb2 as struct # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore +from google.rpc import status_pb2 as status # type: ignore + +from .transports.base import SpannerTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import SpannerGrpcAsyncIOTransport +from .client import SpannerClient + + +class SpannerAsyncClient: + """Cloud Spanner API + The Cloud Spanner API can be used to manage sessions and execute + transactions on data stored in Cloud Spanner databases. + """ + + _client: SpannerClient + + DEFAULT_ENDPOINT = SpannerClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = SpannerClient.DEFAULT_MTLS_ENDPOINT + + database_path = staticmethod(SpannerClient.database_path) + parse_database_path = staticmethod(SpannerClient.parse_database_path) + session_path = staticmethod(SpannerClient.session_path) + parse_session_path = staticmethod(SpannerClient.parse_session_path) + + common_billing_account_path = staticmethod( + SpannerClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + SpannerClient.parse_common_billing_account_path + ) + + common_folder_path = staticmethod(SpannerClient.common_folder_path) + parse_common_folder_path = staticmethod(SpannerClient.parse_common_folder_path) + + common_organization_path = staticmethod(SpannerClient.common_organization_path) + parse_common_organization_path = staticmethod( + SpannerClient.parse_common_organization_path + ) + + common_project_path = staticmethod(SpannerClient.common_project_path) + parse_common_project_path = staticmethod(SpannerClient.parse_common_project_path) + + common_location_path = staticmethod(SpannerClient.common_location_path) + parse_common_location_path = staticmethod(SpannerClient.parse_common_location_path) + + from_service_account_file = SpannerClient.from_service_account_file + from_service_account_json = from_service_account_file + + @property + def transport(self) -> SpannerTransport: + """Return the transport used by the client instance. + + Returns: + SpannerTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial( + type(SpannerClient).get_transport_class, type(SpannerClient) + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, SpannerTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the spanner client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.SpannerTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = SpannerClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def create_session( + self, + request: spanner.CreateSessionRequest = None, + *, + database: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> spanner.Session: + r"""Creates a new session. A session can be used to perform + transactions that read and/or modify data in a Cloud Spanner + database. Sessions are meant to be reused for many consecutive + transactions. + + Sessions can only execute one transaction at a time. To execute + multiple concurrent read-write/write-only transactions, create + multiple sessions. Note that standalone reads and queries use a + transaction internally, and count toward the one transaction + limit. + + Active sessions use additional server resources, so it is a good + idea to delete idle and unneeded sessions. Aside from explicit + deletes, Cloud Spanner may delete sessions for which no + operations are sent for more than an hour. If a session is + deleted, requests to it return ``NOT_FOUND``. + + Idle sessions can be kept alive by sending a trivial SQL query + periodically, e.g., ``"SELECT 1"``. + + Args: + request (:class:`~.spanner.CreateSessionRequest`): + The request object. The request for + [CreateSession][google.spanner.v1.Spanner.CreateSession]. + database (:class:`str`): + Required. The database in which the + new session is created. + This corresponds to the ``database`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.spanner.Session: + A session in the Cloud Spanner API. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([database]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = spanner.CreateSessionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if database is not None: + request.database = database + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_session, + default_retry=retries.Retry( + initial=0.25, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def batch_create_sessions( + self, + request: spanner.BatchCreateSessionsRequest = None, + *, + database: str = None, + session_count: int = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> spanner.BatchCreateSessionsResponse: + r"""Creates multiple new sessions. + This API can be used to initialize a session cache on + the clients. See https://goo.gl/TgSFN2 for best + practices on session cache management. + + Args: + request (:class:`~.spanner.BatchCreateSessionsRequest`): + The request object. The request for + [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions]. + database (:class:`str`): + Required. The database in which the + new sessions are created. + This corresponds to the ``database`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + session_count (:class:`int`): + Required. The number of sessions to be created in this + batch call. The API may return fewer than the requested + number of sessions. If a specific number of sessions are + desired, the client can make additional calls to + BatchCreateSessions (adjusting + [session_count][google.spanner.v1.BatchCreateSessionsRequest.session_count] + as necessary). + This corresponds to the ``session_count`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.spanner.BatchCreateSessionsResponse: + The response for + [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([database, session_count]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = spanner.BatchCreateSessionsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if database is not None: + request.database = database + if session_count is not None: + request.session_count = session_count + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_create_sessions, + default_retry=retries.Retry( + initial=0.25, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_session( + self, + request: spanner.GetSessionRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> spanner.Session: + r"""Gets a session. Returns ``NOT_FOUND`` if the session does not + exist. This is mainly useful for determining whether a session + is still alive. + + Args: + request (:class:`~.spanner.GetSessionRequest`): + The request object. The request for + [GetSession][google.spanner.v1.Spanner.GetSession]. + name (:class:`str`): + Required. The name of the session to + retrieve. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.spanner.Session: + A session in the Cloud Spanner API. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = spanner.GetSessionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_session, + default_retry=retries.Retry( + initial=0.25, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_sessions( + self, + request: spanner.ListSessionsRequest = None, + *, + database: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListSessionsAsyncPager: + r"""Lists all sessions in a given database. + + Args: + request (:class:`~.spanner.ListSessionsRequest`): + The request object. The request for + [ListSessions][google.spanner.v1.Spanner.ListSessions]. + database (:class:`str`): + Required. The database in which to + list sessions. + This corresponds to the ``database`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListSessionsAsyncPager: + The response for + [ListSessions][google.spanner.v1.Spanner.ListSessions]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([database]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = spanner.ListSessionsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if database is not None: + request.database = database + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_sessions, + default_retry=retries.Retry( + initial=0.25, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=3600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListSessionsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_session( + self, + request: spanner.DeleteSessionRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Ends a session, releasing server resources associated + with it. This will asynchronously trigger cancellation + of any operations that are running with this session. + + Args: + request (:class:`~.spanner.DeleteSessionRequest`): + The request object. The request for + [DeleteSession][google.spanner.v1.Spanner.DeleteSession]. + name (:class:`str`): + Required. The name of the session to + delete. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = spanner.DeleteSessionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_session, + default_retry=retries.Retry( + initial=0.25, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + async def execute_sql( + self, + request: spanner.ExecuteSqlRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> result_set.ResultSet: + r"""Executes an SQL statement, returning all results in a single + reply. This method cannot be used to return a result set larger + than 10 MiB; if the query yields more data than that, the query + fails with a ``FAILED_PRECONDITION`` error. + + Operations inside read-write transactions might return + ``ABORTED``. If this occurs, the application should restart the + transaction from the beginning. See + [Transaction][google.spanner.v1.Transaction] for more details. + + Larger result sets can be fetched in streaming fashion by + calling + [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] + instead. + + Args: + request (:class:`~.spanner.ExecuteSqlRequest`): + The request object. The request for + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and + [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql]. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.result_set.ResultSet: + Results from [Read][google.spanner.v1.Spanner.Read] or + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. + + """ + # Create or coerce a protobuf request object. + + request = spanner.ExecuteSqlRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.execute_sql, + default_retry=retries.Retry( + initial=0.25, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("session", request.session),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def execute_streaming_sql( + self, + request: spanner.ExecuteSqlRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Awaitable[AsyncIterable[result_set.PartialResultSet]]: + r"""Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except + returns the result set as a stream. Unlike + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there is no + limit on the size of the returned result set. However, no + individual row in the result set can exceed 100 MiB, and no + column value can exceed 10 MiB. + + Args: + request (:class:`~.spanner.ExecuteSqlRequest`): + The request object. The request for + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and + [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql]. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + AsyncIterable[~.result_set.PartialResultSet]: + Partial results from a streaming read + or SQL query. Streaming reads and SQL + queries better tolerate large result + sets, large rows, and large values, but + are a little trickier to consume. + + """ + # Create or coerce a protobuf request object. + + request = spanner.ExecuteSqlRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.execute_streaming_sql, + default_timeout=3600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("session", request.session),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def execute_batch_dml( + self, + request: spanner.ExecuteBatchDmlRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> spanner.ExecuteBatchDmlResponse: + r"""Executes a batch of SQL DML statements. This method allows many + statements to be run with lower latency than submitting them + sequentially with + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. + + Statements are executed in sequential order. A request can + succeed even if a statement fails. The + [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status] + field in the response provides information about the statement + that failed. Clients must inspect this field to determine + whether an error occurred. + + Execution stops after the first failed statement; the remaining + statements are not executed. + + Args: + request (:class:`~.spanner.ExecuteBatchDmlRequest`): + The request object. The request for + [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.spanner.ExecuteBatchDmlResponse: + The response for + [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. + Contains a list of + [ResultSet][google.spanner.v1.ResultSet] messages, one + for each DML statement that has successfully executed, + in the same order as the statements in the request. If a + statement fails, the status in the response body + identifies the cause of the failure. + + To check for DML statements that failed, use the + following approach: + + 1. Check the status in the response message. The + [google.rpc.Code][google.rpc.Code] enum value ``OK`` + indicates that all statements were executed + successfully. + 2. If the status was not ``OK``, check the number of + result sets in the response. If the response contains + ``N`` [ResultSet][google.spanner.v1.ResultSet] + messages, then statement ``N+1`` in the request + failed. + + Example 1: + + - Request: 5 DML statements, all executed successfully. + - Response: 5 [ResultSet][google.spanner.v1.ResultSet] + messages, with the status ``OK``. + + Example 2: + + - Request: 5 DML statements. The third statement has a + syntax error. + - Response: 2 [ResultSet][google.spanner.v1.ResultSet] + messages, and a syntax error (``INVALID_ARGUMENT``) + status. The number of + [ResultSet][google.spanner.v1.ResultSet] messages + indicates that the third statement failed, and the + fourth and fifth statements were not executed. + + """ + # Create or coerce a protobuf request object. + + request = spanner.ExecuteBatchDmlRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.execute_batch_dml, + default_retry=retries.Retry( + initial=0.25, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("session", request.session),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def read( + self, + request: spanner.ReadRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> result_set.ResultSet: + r"""Reads rows from the database using key lookups and scans, as a + simple key/value style alternative to + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method + cannot be used to return a result set larger than 10 MiB; if the + read matches more data than that, the read fails with a + ``FAILED_PRECONDITION`` error. + + Reads inside read-write transactions might return ``ABORTED``. + If this occurs, the application should restart the transaction + from the beginning. See + [Transaction][google.spanner.v1.Transaction] for more details. + + Larger result sets can be yielded in streaming fashion by + calling [StreamingRead][google.spanner.v1.Spanner.StreamingRead] + instead. + + Args: + request (:class:`~.spanner.ReadRequest`): + The request object. The request for + [Read][google.spanner.v1.Spanner.Read] and + [StreamingRead][google.spanner.v1.Spanner.StreamingRead]. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.result_set.ResultSet: + Results from [Read][google.spanner.v1.Spanner.Read] or + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. + + """ + # Create or coerce a protobuf request object. + + request = spanner.ReadRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.read, + default_retry=retries.Retry( + initial=0.25, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("session", request.session),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def streaming_read( + self, + request: spanner.ReadRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Awaitable[AsyncIterable[result_set.PartialResultSet]]: + r"""Like [Read][google.spanner.v1.Spanner.Read], except returns the + result set as a stream. Unlike + [Read][google.spanner.v1.Spanner.Read], there is no limit on the + size of the returned result set. However, no individual row in + the result set can exceed 100 MiB, and no column value can + exceed 10 MiB. + + Args: + request (:class:`~.spanner.ReadRequest`): + The request object. The request for + [Read][google.spanner.v1.Spanner.Read] and + [StreamingRead][google.spanner.v1.Spanner.StreamingRead]. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + AsyncIterable[~.result_set.PartialResultSet]: + Partial results from a streaming read + or SQL query. Streaming reads and SQL + queries better tolerate large result + sets, large rows, and large values, but + are a little trickier to consume. + + """ + # Create or coerce a protobuf request object. + + request = spanner.ReadRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.streaming_read, + default_timeout=3600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("session", request.session),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def begin_transaction( + self, + request: spanner.BeginTransactionRequest = None, + *, + session: str = None, + options: transaction.TransactionOptions = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> transaction.Transaction: + r"""Begins a new transaction. This step can often be skipped: + [Read][google.spanner.v1.Spanner.Read], + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and + [Commit][google.spanner.v1.Spanner.Commit] can begin a new + transaction as a side-effect. + + Args: + request (:class:`~.spanner.BeginTransactionRequest`): + The request object. The request for + [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction]. + session (:class:`str`): + Required. The session in which the + transaction runs. + This corresponds to the ``session`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + options (:class:`~.transaction.TransactionOptions`): + Required. Options for the new + transaction. + This corresponds to the ``options`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.transaction.Transaction: + A transaction. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([session, options]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = spanner.BeginTransactionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if session is not None: + request.session = session + if options is not None: + request.options = options + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.begin_transaction, + default_retry=retries.Retry( + initial=0.25, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("session", request.session),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def commit( + self, + request: spanner.CommitRequest = None, + *, + session: str = None, + transaction_id: bytes = None, + mutations: Sequence[mutation.Mutation] = None, + single_use_transaction: transaction.TransactionOptions = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> spanner.CommitResponse: + r"""Commits a transaction. The request includes the mutations to be + applied to rows in the database. + + ``Commit`` might return an ``ABORTED`` error. This can occur at + any time; commonly, the cause is conflicts with concurrent + transactions. However, it can also happen for a variety of other + reasons. If ``Commit`` returns ``ABORTED``, the caller should + re-attempt the transaction from the beginning, re-using the same + session. + + Args: + request (:class:`~.spanner.CommitRequest`): + The request object. The request for + [Commit][google.spanner.v1.Spanner.Commit]. + session (:class:`str`): + Required. The session in which the + transaction to be committed is running. + This corresponds to the ``session`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + transaction_id (:class:`bytes`): + Commit a previously-started + transaction. + This corresponds to the ``transaction_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + mutations (:class:`Sequence[~.mutation.Mutation]`): + The mutations to be executed when + this transaction commits. All mutations + are applied atomically, in the order + they appear in this list. + This corresponds to the ``mutations`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + single_use_transaction (:class:`~.transaction.TransactionOptions`): + Execute mutations in a temporary transaction. Note that + unlike commit of a previously-started transaction, + commit with a temporary transaction is non-idempotent. + That is, if the ``CommitRequest`` is sent to Cloud + Spanner more than once (for instance, due to retries in + the application, or in the transport library), it is + possible that the mutations are executed more than once. + If this is undesirable, use + [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction] + and [Commit][google.spanner.v1.Spanner.Commit] instead. + This corresponds to the ``single_use_transaction`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.spanner.CommitResponse: + The response for + [Commit][google.spanner.v1.Spanner.Commit]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any( + [session, transaction_id, mutations, single_use_transaction] + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = spanner.CommitRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if session is not None: + request.session = session + if transaction_id is not None: + request.transaction_id = transaction_id + if single_use_transaction is not None: + request.single_use_transaction = single_use_transaction + + if mutations: + request.mutations.extend(mutations) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.commit, + default_retry=retries.Retry( + initial=0.25, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=3600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("session", request.session),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def rollback( + self, + request: spanner.RollbackRequest = None, + *, + session: str = None, + transaction_id: bytes = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Rolls back a transaction, releasing any locks it holds. It is a + good idea to call this for any transaction that includes one or + more [Read][google.spanner.v1.Spanner.Read] or + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and + ultimately decides not to commit. + + ``Rollback`` returns ``OK`` if it successfully aborts the + transaction, the transaction was already aborted, or the + transaction is not found. ``Rollback`` never returns + ``ABORTED``. + + Args: + request (:class:`~.spanner.RollbackRequest`): + The request object. The request for + [Rollback][google.spanner.v1.Spanner.Rollback]. + session (:class:`str`): + Required. The session in which the + transaction to roll back is running. + This corresponds to the ``session`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + transaction_id (:class:`bytes`): + Required. The transaction to roll + back. + This corresponds to the ``transaction_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([session, transaction_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = spanner.RollbackRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if session is not None: + request.session = session + if transaction_id is not None: + request.transaction_id = transaction_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.rollback, + default_retry=retries.Retry( + initial=0.25, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("session", request.session),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + async def partition_query( + self, + request: spanner.PartitionQueryRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> spanner.PartitionResponse: + r"""Creates a set of partition tokens that can be used to execute a + query operation in parallel. Each of the returned partition + tokens can be used by + [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] + to specify a subset of the query result to read. The same + session and read-only transaction must be used by the + PartitionQueryRequest used to create the partition tokens and + the ExecuteSqlRequests that use the partition tokens. + + Partition tokens become invalid when the session used to create + them is deleted, is idle for too long, begins a new transaction, + or becomes too old. When any of these happen, it is not possible + to resume the query, and the whole operation must be restarted + from the beginning. + + Args: + request (:class:`~.spanner.PartitionQueryRequest`): + The request object. The request for + [PartitionQuery][google.spanner.v1.Spanner.PartitionQuery] + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.spanner.PartitionResponse: + The response for + [PartitionQuery][google.spanner.v1.Spanner.PartitionQuery] + or + [PartitionRead][google.spanner.v1.Spanner.PartitionRead] + + """ + # Create or coerce a protobuf request object. + + request = spanner.PartitionQueryRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.partition_query, + default_retry=retries.Retry( + initial=0.25, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("session", request.session),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def partition_read( + self, + request: spanner.PartitionReadRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> spanner.PartitionResponse: + r"""Creates a set of partition tokens that can be used to execute a + read operation in parallel. Each of the returned partition + tokens can be used by + [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to + specify a subset of the read result to read. The same session + and read-only transaction must be used by the + PartitionReadRequest used to create the partition tokens and the + ReadRequests that use the partition tokens. There are no + ordering guarantees on rows returned among the returned + partition tokens, or even within each individual StreamingRead + call issued with a partition_token. + + Partition tokens become invalid when the session used to create + them is deleted, is idle for too long, begins a new transaction, + or becomes too old. When any of these happen, it is not possible + to resume the read, and the whole operation must be restarted + from the beginning. + + Args: + request (:class:`~.spanner.PartitionReadRequest`): + The request object. The request for + [PartitionRead][google.spanner.v1.Spanner.PartitionRead] + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.spanner.PartitionResponse: + The response for + [PartitionQuery][google.spanner.v1.Spanner.PartitionQuery] + or + [PartitionRead][google.spanner.v1.Spanner.PartitionRead] + + """ + # Create or coerce a protobuf request object. + + request = spanner.PartitionReadRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.partition_read, + default_retry=retries.Retry( + initial=0.25, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=30.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("session", request.session),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-spanner",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("SpannerAsyncClient",) diff --git a/google/cloud/spanner_v1/services/spanner/client.py b/google/cloud/spanner_v1/services/spanner/client.py new file mode 100644 index 0000000000..50e4792b76 --- /dev/null +++ b/google/cloud/spanner_v1/services/spanner/client.py @@ -0,0 +1,1550 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Iterable, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.spanner_v1.services.spanner import pagers +from google.cloud.spanner_v1.types import mutation +from google.cloud.spanner_v1.types import result_set +from google.cloud.spanner_v1.types import spanner +from google.cloud.spanner_v1.types import transaction +from google.protobuf import struct_pb2 as struct # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore +from google.rpc import status_pb2 as status # type: ignore + +from .transports.base import SpannerTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import SpannerGrpcTransport +from .transports.grpc_asyncio import SpannerGrpcAsyncIOTransport + + +class SpannerClientMeta(type): + """Metaclass for the Spanner client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = OrderedDict() # type: Dict[str, Type[SpannerTransport]] + _transport_registry["grpc"] = SpannerGrpcTransport + _transport_registry["grpc_asyncio"] = SpannerGrpcAsyncIOTransport + + def get_transport_class(cls, label: str = None,) -> Type[SpannerTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class SpannerClient(metaclass=SpannerClientMeta): + """Cloud Spanner API + The Cloud Spanner API can be used to manage sessions and execute + transactions on data stored in Cloud Spanner databases. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "spanner.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + {@api.name}: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> SpannerTransport: + """Return the transport used by the client instance. + + Returns: + SpannerTransport: The transport used by the client instance. + """ + return self._transport + + @staticmethod + def database_path(project: str, instance: str, database: str,) -> str: + """Return a fully-qualified database string.""" + return "projects/{project}/instances/{instance}/databases/{database}".format( + project=project, instance=instance, database=database, + ) + + @staticmethod + def parse_database_path(path: str) -> Dict[str, str]: + """Parse a database path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/instances/(?P.+?)/databases/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def session_path(project: str, instance: str, database: str, session: str,) -> str: + """Return a fully-qualified session string.""" + return "projects/{project}/instances/{instance}/databases/{database}/sessions/{session}".format( + project=project, instance=instance, database=database, session=session, + ) + + @staticmethod + def parse_session_path(path: str) -> Dict[str, str]: + """Parse a session path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/instances/(?P.+?)/databases/(?P.+?)/sessions/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str,) -> str: + """Return a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str,) -> str: + """Return a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder,) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str,) -> str: + """Return a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization,) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str,) -> str: + """Return a fully-qualified project string.""" + return "projects/{project}".format(project=project,) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str,) -> str: + """Return a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__( + self, + *, + credentials: Optional[credentials.Credentials] = None, + transport: Union[str, SpannerTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the spanner client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.SpannerTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (client_options_lib.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool( + util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + ) + + ssl_credentials = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + import grpc # type: ignore + + cert, key = client_options.client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + is_mtls = True + else: + creds = SslCredentials() + is_mtls = creds.is_mtls + ssl_credentials = creds.ssl_credentials if is_mtls else None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, SpannerTransport): + # transport is a SpannerTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + ssl_channel_credentials=ssl_credentials, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def create_session( + self, + request: spanner.CreateSessionRequest = None, + *, + database: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> spanner.Session: + r"""Creates a new session. A session can be used to perform + transactions that read and/or modify data in a Cloud Spanner + database. Sessions are meant to be reused for many consecutive + transactions. + + Sessions can only execute one transaction at a time. To execute + multiple concurrent read-write/write-only transactions, create + multiple sessions. Note that standalone reads and queries use a + transaction internally, and count toward the one transaction + limit. + + Active sessions use additional server resources, so it is a good + idea to delete idle and unneeded sessions. Aside from explicit + deletes, Cloud Spanner may delete sessions for which no + operations are sent for more than an hour. If a session is + deleted, requests to it return ``NOT_FOUND``. + + Idle sessions can be kept alive by sending a trivial SQL query + periodically, e.g., ``"SELECT 1"``. + + Args: + request (:class:`~.spanner.CreateSessionRequest`): + The request object. The request for + [CreateSession][google.spanner.v1.Spanner.CreateSession]. + database (:class:`str`): + Required. The database in which the + new session is created. + This corresponds to the ``database`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.spanner.Session: + A session in the Cloud Spanner API. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([database]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a spanner.CreateSessionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, spanner.CreateSessionRequest): + request = spanner.CreateSessionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if database is not None: + request.database = database + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_session] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def batch_create_sessions( + self, + request: spanner.BatchCreateSessionsRequest = None, + *, + database: str = None, + session_count: int = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> spanner.BatchCreateSessionsResponse: + r"""Creates multiple new sessions. + This API can be used to initialize a session cache on + the clients. See https://goo.gl/TgSFN2 for best + practices on session cache management. + + Args: + request (:class:`~.spanner.BatchCreateSessionsRequest`): + The request object. The request for + [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions]. + database (:class:`str`): + Required. The database in which the + new sessions are created. + This corresponds to the ``database`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + session_count (:class:`int`): + Required. The number of sessions to be created in this + batch call. The API may return fewer than the requested + number of sessions. If a specific number of sessions are + desired, the client can make additional calls to + BatchCreateSessions (adjusting + [session_count][google.spanner.v1.BatchCreateSessionsRequest.session_count] + as necessary). + This corresponds to the ``session_count`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.spanner.BatchCreateSessionsResponse: + The response for + [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([database, session_count]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a spanner.BatchCreateSessionsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, spanner.BatchCreateSessionsRequest): + request = spanner.BatchCreateSessionsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if database is not None: + request.database = database + if session_count is not None: + request.session_count = session_count + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.batch_create_sessions] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_session( + self, + request: spanner.GetSessionRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> spanner.Session: + r"""Gets a session. Returns ``NOT_FOUND`` if the session does not + exist. This is mainly useful for determining whether a session + is still alive. + + Args: + request (:class:`~.spanner.GetSessionRequest`): + The request object. The request for + [GetSession][google.spanner.v1.Spanner.GetSession]. + name (:class:`str`): + Required. The name of the session to + retrieve. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.spanner.Session: + A session in the Cloud Spanner API. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a spanner.GetSessionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, spanner.GetSessionRequest): + request = spanner.GetSessionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_session] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_sessions( + self, + request: spanner.ListSessionsRequest = None, + *, + database: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListSessionsPager: + r"""Lists all sessions in a given database. + + Args: + request (:class:`~.spanner.ListSessionsRequest`): + The request object. The request for + [ListSessions][google.spanner.v1.Spanner.ListSessions]. + database (:class:`str`): + Required. The database in which to + list sessions. + This corresponds to the ``database`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListSessionsPager: + The response for + [ListSessions][google.spanner.v1.Spanner.ListSessions]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([database]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a spanner.ListSessionsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, spanner.ListSessionsRequest): + request = spanner.ListSessionsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if database is not None: + request.database = database + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_sessions] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListSessionsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_session( + self, + request: spanner.DeleteSessionRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Ends a session, releasing server resources associated + with it. This will asynchronously trigger cancellation + of any operations that are running with this session. + + Args: + request (:class:`~.spanner.DeleteSessionRequest`): + The request object. The request for + [DeleteSession][google.spanner.v1.Spanner.DeleteSession]. + name (:class:`str`): + Required. The name of the session to + delete. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a spanner.DeleteSessionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, spanner.DeleteSessionRequest): + request = spanner.DeleteSessionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_session] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + def execute_sql( + self, + request: spanner.ExecuteSqlRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> result_set.ResultSet: + r"""Executes an SQL statement, returning all results in a single + reply. This method cannot be used to return a result set larger + than 10 MiB; if the query yields more data than that, the query + fails with a ``FAILED_PRECONDITION`` error. + + Operations inside read-write transactions might return + ``ABORTED``. If this occurs, the application should restart the + transaction from the beginning. See + [Transaction][google.spanner.v1.Transaction] for more details. + + Larger result sets can be fetched in streaming fashion by + calling + [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] + instead. + + Args: + request (:class:`~.spanner.ExecuteSqlRequest`): + The request object. The request for + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and + [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql]. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.result_set.ResultSet: + Results from [Read][google.spanner.v1.Spanner.Read] or + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. + + """ + # Create or coerce a protobuf request object. + + # Minor optimization to avoid making a copy if the user passes + # in a spanner.ExecuteSqlRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, spanner.ExecuteSqlRequest): + request = spanner.ExecuteSqlRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.execute_sql] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("session", request.session),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def execute_streaming_sql( + self, + request: spanner.ExecuteSqlRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Iterable[result_set.PartialResultSet]: + r"""Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except + returns the result set as a stream. Unlike + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there is no + limit on the size of the returned result set. However, no + individual row in the result set can exceed 100 MiB, and no + column value can exceed 10 MiB. + + Args: + request (:class:`~.spanner.ExecuteSqlRequest`): + The request object. The request for + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and + [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql]. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + Iterable[~.result_set.PartialResultSet]: + Partial results from a streaming read + or SQL query. Streaming reads and SQL + queries better tolerate large result + sets, large rows, and large values, but + are a little trickier to consume. + + """ + # Create or coerce a protobuf request object. + + # Minor optimization to avoid making a copy if the user passes + # in a spanner.ExecuteSqlRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, spanner.ExecuteSqlRequest): + request = spanner.ExecuteSqlRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.execute_streaming_sql] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("session", request.session),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def execute_batch_dml( + self, + request: spanner.ExecuteBatchDmlRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> spanner.ExecuteBatchDmlResponse: + r"""Executes a batch of SQL DML statements. This method allows many + statements to be run with lower latency than submitting them + sequentially with + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. + + Statements are executed in sequential order. A request can + succeed even if a statement fails. The + [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status] + field in the response provides information about the statement + that failed. Clients must inspect this field to determine + whether an error occurred. + + Execution stops after the first failed statement; the remaining + statements are not executed. + + Args: + request (:class:`~.spanner.ExecuteBatchDmlRequest`): + The request object. The request for + [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.spanner.ExecuteBatchDmlResponse: + The response for + [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. + Contains a list of + [ResultSet][google.spanner.v1.ResultSet] messages, one + for each DML statement that has successfully executed, + in the same order as the statements in the request. If a + statement fails, the status in the response body + identifies the cause of the failure. + + To check for DML statements that failed, use the + following approach: + + 1. Check the status in the response message. The + [google.rpc.Code][google.rpc.Code] enum value ``OK`` + indicates that all statements were executed + successfully. + 2. If the status was not ``OK``, check the number of + result sets in the response. If the response contains + ``N`` [ResultSet][google.spanner.v1.ResultSet] + messages, then statement ``N+1`` in the request + failed. + + Example 1: + + - Request: 5 DML statements, all executed successfully. + - Response: 5 [ResultSet][google.spanner.v1.ResultSet] + messages, with the status ``OK``. + + Example 2: + + - Request: 5 DML statements. The third statement has a + syntax error. + - Response: 2 [ResultSet][google.spanner.v1.ResultSet] + messages, and a syntax error (``INVALID_ARGUMENT``) + status. The number of + [ResultSet][google.spanner.v1.ResultSet] messages + indicates that the third statement failed, and the + fourth and fifth statements were not executed. + + """ + # Create or coerce a protobuf request object. + + # Minor optimization to avoid making a copy if the user passes + # in a spanner.ExecuteBatchDmlRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, spanner.ExecuteBatchDmlRequest): + request = spanner.ExecuteBatchDmlRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.execute_batch_dml] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("session", request.session),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def read( + self, + request: spanner.ReadRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> result_set.ResultSet: + r"""Reads rows from the database using key lookups and scans, as a + simple key/value style alternative to + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method + cannot be used to return a result set larger than 10 MiB; if the + read matches more data than that, the read fails with a + ``FAILED_PRECONDITION`` error. + + Reads inside read-write transactions might return ``ABORTED``. + If this occurs, the application should restart the transaction + from the beginning. See + [Transaction][google.spanner.v1.Transaction] for more details. + + Larger result sets can be yielded in streaming fashion by + calling [StreamingRead][google.spanner.v1.Spanner.StreamingRead] + instead. + + Args: + request (:class:`~.spanner.ReadRequest`): + The request object. The request for + [Read][google.spanner.v1.Spanner.Read] and + [StreamingRead][google.spanner.v1.Spanner.StreamingRead]. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.result_set.ResultSet: + Results from [Read][google.spanner.v1.Spanner.Read] or + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. + + """ + # Create or coerce a protobuf request object. + + # Minor optimization to avoid making a copy if the user passes + # in a spanner.ReadRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, spanner.ReadRequest): + request = spanner.ReadRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.read] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("session", request.session),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def streaming_read( + self, + request: spanner.ReadRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Iterable[result_set.PartialResultSet]: + r"""Like [Read][google.spanner.v1.Spanner.Read], except returns the + result set as a stream. Unlike + [Read][google.spanner.v1.Spanner.Read], there is no limit on the + size of the returned result set. However, no individual row in + the result set can exceed 100 MiB, and no column value can + exceed 10 MiB. + + Args: + request (:class:`~.spanner.ReadRequest`): + The request object. The request for + [Read][google.spanner.v1.Spanner.Read] and + [StreamingRead][google.spanner.v1.Spanner.StreamingRead]. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + Iterable[~.result_set.PartialResultSet]: + Partial results from a streaming read + or SQL query. Streaming reads and SQL + queries better tolerate large result + sets, large rows, and large values, but + are a little trickier to consume. + + """ + # Create or coerce a protobuf request object. + + # Minor optimization to avoid making a copy if the user passes + # in a spanner.ReadRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, spanner.ReadRequest): + request = spanner.ReadRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.streaming_read] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("session", request.session),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def begin_transaction( + self, + request: spanner.BeginTransactionRequest = None, + *, + session: str = None, + options: transaction.TransactionOptions = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> transaction.Transaction: + r"""Begins a new transaction. This step can often be skipped: + [Read][google.spanner.v1.Spanner.Read], + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and + [Commit][google.spanner.v1.Spanner.Commit] can begin a new + transaction as a side-effect. + + Args: + request (:class:`~.spanner.BeginTransactionRequest`): + The request object. The request for + [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction]. + session (:class:`str`): + Required. The session in which the + transaction runs. + This corresponds to the ``session`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + options (:class:`~.transaction.TransactionOptions`): + Required. Options for the new + transaction. + This corresponds to the ``options`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.transaction.Transaction: + A transaction. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([session, options]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a spanner.BeginTransactionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, spanner.BeginTransactionRequest): + request = spanner.BeginTransactionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if session is not None: + request.session = session + if options is not None: + request.options = options + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.begin_transaction] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("session", request.session),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def commit( + self, + request: spanner.CommitRequest = None, + *, + session: str = None, + transaction_id: bytes = None, + mutations: Sequence[mutation.Mutation] = None, + single_use_transaction: transaction.TransactionOptions = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> spanner.CommitResponse: + r"""Commits a transaction. The request includes the mutations to be + applied to rows in the database. + + ``Commit`` might return an ``ABORTED`` error. This can occur at + any time; commonly, the cause is conflicts with concurrent + transactions. However, it can also happen for a variety of other + reasons. If ``Commit`` returns ``ABORTED``, the caller should + re-attempt the transaction from the beginning, re-using the same + session. + + Args: + request (:class:`~.spanner.CommitRequest`): + The request object. The request for + [Commit][google.spanner.v1.Spanner.Commit]. + session (:class:`str`): + Required. The session in which the + transaction to be committed is running. + This corresponds to the ``session`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + transaction_id (:class:`bytes`): + Commit a previously-started + transaction. + This corresponds to the ``transaction_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + mutations (:class:`Sequence[~.mutation.Mutation]`): + The mutations to be executed when + this transaction commits. All mutations + are applied atomically, in the order + they appear in this list. + This corresponds to the ``mutations`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + single_use_transaction (:class:`~.transaction.TransactionOptions`): + Execute mutations in a temporary transaction. Note that + unlike commit of a previously-started transaction, + commit with a temporary transaction is non-idempotent. + That is, if the ``CommitRequest`` is sent to Cloud + Spanner more than once (for instance, due to retries in + the application, or in the transport library), it is + possible that the mutations are executed more than once. + If this is undesirable, use + [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction] + and [Commit][google.spanner.v1.Spanner.Commit] instead. + This corresponds to the ``single_use_transaction`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.spanner.CommitResponse: + The response for + [Commit][google.spanner.v1.Spanner.Commit]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any( + [session, transaction_id, mutations, single_use_transaction] + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a spanner.CommitRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, spanner.CommitRequest): + request = spanner.CommitRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if session is not None: + request.session = session + if transaction_id is not None: + request.transaction_id = transaction_id + if single_use_transaction is not None: + request.single_use_transaction = single_use_transaction + + if mutations: + request.mutations.extend(mutations) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.commit] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("session", request.session),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def rollback( + self, + request: spanner.RollbackRequest = None, + *, + session: str = None, + transaction_id: bytes = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Rolls back a transaction, releasing any locks it holds. It is a + good idea to call this for any transaction that includes one or + more [Read][google.spanner.v1.Spanner.Read] or + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and + ultimately decides not to commit. + + ``Rollback`` returns ``OK`` if it successfully aborts the + transaction, the transaction was already aborted, or the + transaction is not found. ``Rollback`` never returns + ``ABORTED``. + + Args: + request (:class:`~.spanner.RollbackRequest`): + The request object. The request for + [Rollback][google.spanner.v1.Spanner.Rollback]. + session (:class:`str`): + Required. The session in which the + transaction to roll back is running. + This corresponds to the ``session`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + transaction_id (:class:`bytes`): + Required. The transaction to roll + back. + This corresponds to the ``transaction_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([session, transaction_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a spanner.RollbackRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, spanner.RollbackRequest): + request = spanner.RollbackRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if session is not None: + request.session = session + if transaction_id is not None: + request.transaction_id = transaction_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.rollback] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("session", request.session),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + def partition_query( + self, + request: spanner.PartitionQueryRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> spanner.PartitionResponse: + r"""Creates a set of partition tokens that can be used to execute a + query operation in parallel. Each of the returned partition + tokens can be used by + [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] + to specify a subset of the query result to read. The same + session and read-only transaction must be used by the + PartitionQueryRequest used to create the partition tokens and + the ExecuteSqlRequests that use the partition tokens. + + Partition tokens become invalid when the session used to create + them is deleted, is idle for too long, begins a new transaction, + or becomes too old. When any of these happen, it is not possible + to resume the query, and the whole operation must be restarted + from the beginning. + + Args: + request (:class:`~.spanner.PartitionQueryRequest`): + The request object. The request for + [PartitionQuery][google.spanner.v1.Spanner.PartitionQuery] + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.spanner.PartitionResponse: + The response for + [PartitionQuery][google.spanner.v1.Spanner.PartitionQuery] + or + [PartitionRead][google.spanner.v1.Spanner.PartitionRead] + + """ + # Create or coerce a protobuf request object. + + # Minor optimization to avoid making a copy if the user passes + # in a spanner.PartitionQueryRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, spanner.PartitionQueryRequest): + request = spanner.PartitionQueryRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.partition_query] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("session", request.session),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def partition_read( + self, + request: spanner.PartitionReadRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> spanner.PartitionResponse: + r"""Creates a set of partition tokens that can be used to execute a + read operation in parallel. Each of the returned partition + tokens can be used by + [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to + specify a subset of the read result to read. The same session + and read-only transaction must be used by the + PartitionReadRequest used to create the partition tokens and the + ReadRequests that use the partition tokens. There are no + ordering guarantees on rows returned among the returned + partition tokens, or even within each individual StreamingRead + call issued with a partition_token. + + Partition tokens become invalid when the session used to create + them is deleted, is idle for too long, begins a new transaction, + or becomes too old. When any of these happen, it is not possible + to resume the read, and the whole operation must be restarted + from the beginning. + + Args: + request (:class:`~.spanner.PartitionReadRequest`): + The request object. The request for + [PartitionRead][google.spanner.v1.Spanner.PartitionRead] + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.spanner.PartitionResponse: + The response for + [PartitionQuery][google.spanner.v1.Spanner.PartitionQuery] + or + [PartitionRead][google.spanner.v1.Spanner.PartitionRead] + + """ + # Create or coerce a protobuf request object. + + # Minor optimization to avoid making a copy if the user passes + # in a spanner.PartitionReadRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, spanner.PartitionReadRequest): + request = spanner.PartitionReadRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.partition_read] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("session", request.session),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-spanner",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("SpannerClient",) diff --git a/google/cloud/spanner_v1/services/spanner/pagers.py b/google/cloud/spanner_v1/services/spanner/pagers.py new file mode 100644 index 0000000000..aff1cf533e --- /dev/null +++ b/google/cloud/spanner_v1/services/spanner/pagers.py @@ -0,0 +1,148 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple + +from google.cloud.spanner_v1.types import spanner + + +class ListSessionsPager: + """A pager for iterating through ``list_sessions`` requests. + + This class thinly wraps an initial + :class:`~.spanner.ListSessionsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``sessions`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListSessions`` requests and continue to iterate + through the ``sessions`` field on the + corresponding responses. + + All the usual :class:`~.spanner.ListSessionsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., spanner.ListSessionsResponse], + request: spanner.ListSessionsRequest, + response: spanner.ListSessionsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.spanner.ListSessionsRequest`): + The initial request object. + response (:class:`~.spanner.ListSessionsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = spanner.ListSessionsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[spanner.ListSessionsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[spanner.Session]: + for page in self.pages: + yield from page.sessions + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListSessionsAsyncPager: + """A pager for iterating through ``list_sessions`` requests. + + This class thinly wraps an initial + :class:`~.spanner.ListSessionsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``sessions`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListSessions`` requests and continue to iterate + through the ``sessions`` field on the + corresponding responses. + + All the usual :class:`~.spanner.ListSessionsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[spanner.ListSessionsResponse]], + request: spanner.ListSessionsRequest, + response: spanner.ListSessionsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.spanner.ListSessionsRequest`): + The initial request object. + response (:class:`~.spanner.ListSessionsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = spanner.ListSessionsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[spanner.ListSessionsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[spanner.Session]: + async def async_generator(): + async for page in self.pages: + for response in page.sessions: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/spanner_v1/services/spanner/transports/__init__.py b/google/cloud/spanner_v1/services/spanner/transports/__init__.py new file mode 100644 index 0000000000..1bf46eb475 --- /dev/null +++ b/google/cloud/spanner_v1/services/spanner/transports/__init__.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import SpannerTransport +from .grpc import SpannerGrpcTransport +from .grpc_asyncio import SpannerGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[SpannerTransport]] +_transport_registry["grpc"] = SpannerGrpcTransport +_transport_registry["grpc_asyncio"] = SpannerGrpcAsyncIOTransport + + +__all__ = ( + "SpannerTransport", + "SpannerGrpcTransport", + "SpannerGrpcAsyncIOTransport", +) diff --git a/google/cloud/spanner_v1/services/spanner/transports/base.py b/google/cloud/spanner_v1/services/spanner/transports/base.py new file mode 100644 index 0000000000..36e3c0cb52 --- /dev/null +++ b/google/cloud/spanner_v1/services/spanner/transports/base.py @@ -0,0 +1,420 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.spanner_v1.types import result_set +from google.cloud.spanner_v1.types import spanner +from google.cloud.spanner_v1.types import transaction +from google.protobuf import empty_pb2 as empty # type: ignore + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-spanner",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class SpannerTransport(abc.ABC): + """Abstract transport class for Spanner.""" + + AUTH_SCOPES = ( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.data", + ) + + def __init__( + self, + *, + host: str = "spanner.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages(client_info) + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_session: gapic_v1.method.wrap_method( + self.create_session, + default_retry=retries.Retry( + initial=0.25, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.batch_create_sessions: gapic_v1.method.wrap_method( + self.batch_create_sessions, + default_retry=retries.Retry( + initial=0.25, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_session: gapic_v1.method.wrap_method( + self.get_session, + default_retry=retries.Retry( + initial=0.25, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.list_sessions: gapic_v1.method.wrap_method( + self.list_sessions, + default_retry=retries.Retry( + initial=0.25, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=3600.0, + client_info=client_info, + ), + self.delete_session: gapic_v1.method.wrap_method( + self.delete_session, + default_retry=retries.Retry( + initial=0.25, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.execute_sql: gapic_v1.method.wrap_method( + self.execute_sql, + default_retry=retries.Retry( + initial=0.25, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.execute_streaming_sql: gapic_v1.method.wrap_method( + self.execute_streaming_sql, + default_timeout=3600.0, + client_info=client_info, + ), + self.execute_batch_dml: gapic_v1.method.wrap_method( + self.execute_batch_dml, + default_retry=retries.Retry( + initial=0.25, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.read: gapic_v1.method.wrap_method( + self.read, + default_retry=retries.Retry( + initial=0.25, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.streaming_read: gapic_v1.method.wrap_method( + self.streaming_read, default_timeout=3600.0, client_info=client_info, + ), + self.begin_transaction: gapic_v1.method.wrap_method( + self.begin_transaction, + default_retry=retries.Retry( + initial=0.25, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.commit: gapic_v1.method.wrap_method( + self.commit, + default_retry=retries.Retry( + initial=0.25, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=3600.0, + client_info=client_info, + ), + self.rollback: gapic_v1.method.wrap_method( + self.rollback, + default_retry=retries.Retry( + initial=0.25, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.partition_query: gapic_v1.method.wrap_method( + self.partition_query, + default_retry=retries.Retry( + initial=0.25, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=30.0, + client_info=client_info, + ), + self.partition_read: gapic_v1.method.wrap_method( + self.partition_read, + default_retry=retries.Retry( + initial=0.25, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=30.0, + client_info=client_info, + ), + } + + @property + def create_session( + self, + ) -> typing.Callable[ + [spanner.CreateSessionRequest], + typing.Union[spanner.Session, typing.Awaitable[spanner.Session]], + ]: + raise NotImplementedError() + + @property + def batch_create_sessions( + self, + ) -> typing.Callable[ + [spanner.BatchCreateSessionsRequest], + typing.Union[ + spanner.BatchCreateSessionsResponse, + typing.Awaitable[spanner.BatchCreateSessionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_session( + self, + ) -> typing.Callable[ + [spanner.GetSessionRequest], + typing.Union[spanner.Session, typing.Awaitable[spanner.Session]], + ]: + raise NotImplementedError() + + @property + def list_sessions( + self, + ) -> typing.Callable[ + [spanner.ListSessionsRequest], + typing.Union[ + spanner.ListSessionsResponse, typing.Awaitable[spanner.ListSessionsResponse] + ], + ]: + raise NotImplementedError() + + @property + def delete_session( + self, + ) -> typing.Callable[ + [spanner.DeleteSessionRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + @property + def execute_sql( + self, + ) -> typing.Callable[ + [spanner.ExecuteSqlRequest], + typing.Union[result_set.ResultSet, typing.Awaitable[result_set.ResultSet]], + ]: + raise NotImplementedError() + + @property + def execute_streaming_sql( + self, + ) -> typing.Callable[ + [spanner.ExecuteSqlRequest], + typing.Union[ + result_set.PartialResultSet, typing.Awaitable[result_set.PartialResultSet] + ], + ]: + raise NotImplementedError() + + @property + def execute_batch_dml( + self, + ) -> typing.Callable[ + [spanner.ExecuteBatchDmlRequest], + typing.Union[ + spanner.ExecuteBatchDmlResponse, + typing.Awaitable[spanner.ExecuteBatchDmlResponse], + ], + ]: + raise NotImplementedError() + + @property + def read( + self, + ) -> typing.Callable[ + [spanner.ReadRequest], + typing.Union[result_set.ResultSet, typing.Awaitable[result_set.ResultSet]], + ]: + raise NotImplementedError() + + @property + def streaming_read( + self, + ) -> typing.Callable[ + [spanner.ReadRequest], + typing.Union[ + result_set.PartialResultSet, typing.Awaitable[result_set.PartialResultSet] + ], + ]: + raise NotImplementedError() + + @property + def begin_transaction( + self, + ) -> typing.Callable[ + [spanner.BeginTransactionRequest], + typing.Union[ + transaction.Transaction, typing.Awaitable[transaction.Transaction] + ], + ]: + raise NotImplementedError() + + @property + def commit( + self, + ) -> typing.Callable[ + [spanner.CommitRequest], + typing.Union[spanner.CommitResponse, typing.Awaitable[spanner.CommitResponse]], + ]: + raise NotImplementedError() + + @property + def rollback( + self, + ) -> typing.Callable[ + [spanner.RollbackRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + @property + def partition_query( + self, + ) -> typing.Callable[ + [spanner.PartitionQueryRequest], + typing.Union[ + spanner.PartitionResponse, typing.Awaitable[spanner.PartitionResponse] + ], + ]: + raise NotImplementedError() + + @property + def partition_read( + self, + ) -> typing.Callable[ + [spanner.PartitionReadRequest], + typing.Union[ + spanner.PartitionResponse, typing.Awaitable[spanner.PartitionResponse] + ], + ]: + raise NotImplementedError() + + +__all__ = ("SpannerTransport",) diff --git a/google/cloud/spanner_v1/services/spanner/transports/grpc.py b/google/cloud/spanner_v1/services/spanner/transports/grpc.py new file mode 100644 index 0000000000..620a971775 --- /dev/null +++ b/google/cloud/spanner_v1/services/spanner/transports/grpc.py @@ -0,0 +1,741 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.spanner_v1.types import result_set +from google.cloud.spanner_v1.types import spanner +from google.cloud.spanner_v1.types import transaction +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import SpannerTransport, DEFAULT_CLIENT_INFO + + +class SpannerGrpcTransport(SpannerTransport): + """gRPC backend transport for Spanner. + + Cloud Spanner API + The Cloud Spanner API can be used to manage sessions and execute + transactions on data stored in Cloud Spanner databases. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "spanner.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} # type: Dict[str, Callable] + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + @classmethod + def create_channel( + cls, + host: str = "spanner.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optionsl[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def create_session( + self, + ) -> Callable[[spanner.CreateSessionRequest], spanner.Session]: + r"""Return a callable for the create session method over gRPC. + + Creates a new session. A session can be used to perform + transactions that read and/or modify data in a Cloud Spanner + database. Sessions are meant to be reused for many consecutive + transactions. + + Sessions can only execute one transaction at a time. To execute + multiple concurrent read-write/write-only transactions, create + multiple sessions. Note that standalone reads and queries use a + transaction internally, and count toward the one transaction + limit. + + Active sessions use additional server resources, so it is a good + idea to delete idle and unneeded sessions. Aside from explicit + deletes, Cloud Spanner may delete sessions for which no + operations are sent for more than an hour. If a session is + deleted, requests to it return ``NOT_FOUND``. + + Idle sessions can be kept alive by sending a trivial SQL query + periodically, e.g., ``"SELECT 1"``. + + Returns: + Callable[[~.CreateSessionRequest], + ~.Session]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_session" not in self._stubs: + self._stubs["create_session"] = self.grpc_channel.unary_unary( + "/google.spanner.v1.Spanner/CreateSession", + request_serializer=spanner.CreateSessionRequest.serialize, + response_deserializer=spanner.Session.deserialize, + ) + return self._stubs["create_session"] + + @property + def batch_create_sessions( + self, + ) -> Callable[ + [spanner.BatchCreateSessionsRequest], spanner.BatchCreateSessionsResponse + ]: + r"""Return a callable for the batch create sessions method over gRPC. + + Creates multiple new sessions. + This API can be used to initialize a session cache on + the clients. See https://goo.gl/TgSFN2 for best + practices on session cache management. + + Returns: + Callable[[~.BatchCreateSessionsRequest], + ~.BatchCreateSessionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_create_sessions" not in self._stubs: + self._stubs["batch_create_sessions"] = self.grpc_channel.unary_unary( + "/google.spanner.v1.Spanner/BatchCreateSessions", + request_serializer=spanner.BatchCreateSessionsRequest.serialize, + response_deserializer=spanner.BatchCreateSessionsResponse.deserialize, + ) + return self._stubs["batch_create_sessions"] + + @property + def get_session(self) -> Callable[[spanner.GetSessionRequest], spanner.Session]: + r"""Return a callable for the get session method over gRPC. + + Gets a session. Returns ``NOT_FOUND`` if the session does not + exist. This is mainly useful for determining whether a session + is still alive. + + Returns: + Callable[[~.GetSessionRequest], + ~.Session]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_session" not in self._stubs: + self._stubs["get_session"] = self.grpc_channel.unary_unary( + "/google.spanner.v1.Spanner/GetSession", + request_serializer=spanner.GetSessionRequest.serialize, + response_deserializer=spanner.Session.deserialize, + ) + return self._stubs["get_session"] + + @property + def list_sessions( + self, + ) -> Callable[[spanner.ListSessionsRequest], spanner.ListSessionsResponse]: + r"""Return a callable for the list sessions method over gRPC. + + Lists all sessions in a given database. + + Returns: + Callable[[~.ListSessionsRequest], + ~.ListSessionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_sessions" not in self._stubs: + self._stubs["list_sessions"] = self.grpc_channel.unary_unary( + "/google.spanner.v1.Spanner/ListSessions", + request_serializer=spanner.ListSessionsRequest.serialize, + response_deserializer=spanner.ListSessionsResponse.deserialize, + ) + return self._stubs["list_sessions"] + + @property + def delete_session(self) -> Callable[[spanner.DeleteSessionRequest], empty.Empty]: + r"""Return a callable for the delete session method over gRPC. + + Ends a session, releasing server resources associated + with it. This will asynchronously trigger cancellation + of any operations that are running with this session. + + Returns: + Callable[[~.DeleteSessionRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_session" not in self._stubs: + self._stubs["delete_session"] = self.grpc_channel.unary_unary( + "/google.spanner.v1.Spanner/DeleteSession", + request_serializer=spanner.DeleteSessionRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_session"] + + @property + def execute_sql( + self, + ) -> Callable[[spanner.ExecuteSqlRequest], result_set.ResultSet]: + r"""Return a callable for the execute sql method over gRPC. + + Executes an SQL statement, returning all results in a single + reply. This method cannot be used to return a result set larger + than 10 MiB; if the query yields more data than that, the query + fails with a ``FAILED_PRECONDITION`` error. + + Operations inside read-write transactions might return + ``ABORTED``. If this occurs, the application should restart the + transaction from the beginning. See + [Transaction][google.spanner.v1.Transaction] for more details. + + Larger result sets can be fetched in streaming fashion by + calling + [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] + instead. + + Returns: + Callable[[~.ExecuteSqlRequest], + ~.ResultSet]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "execute_sql" not in self._stubs: + self._stubs["execute_sql"] = self.grpc_channel.unary_unary( + "/google.spanner.v1.Spanner/ExecuteSql", + request_serializer=spanner.ExecuteSqlRequest.serialize, + response_deserializer=result_set.ResultSet.deserialize, + ) + return self._stubs["execute_sql"] + + @property + def execute_streaming_sql( + self, + ) -> Callable[[spanner.ExecuteSqlRequest], result_set.PartialResultSet]: + r"""Return a callable for the execute streaming sql method over gRPC. + + Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except + returns the result set as a stream. Unlike + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there is no + limit on the size of the returned result set. However, no + individual row in the result set can exceed 100 MiB, and no + column value can exceed 10 MiB. + + Returns: + Callable[[~.ExecuteSqlRequest], + ~.PartialResultSet]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "execute_streaming_sql" not in self._stubs: + self._stubs["execute_streaming_sql"] = self.grpc_channel.unary_stream( + "/google.spanner.v1.Spanner/ExecuteStreamingSql", + request_serializer=spanner.ExecuteSqlRequest.serialize, + response_deserializer=result_set.PartialResultSet.deserialize, + ) + return self._stubs["execute_streaming_sql"] + + @property + def execute_batch_dml( + self, + ) -> Callable[[spanner.ExecuteBatchDmlRequest], spanner.ExecuteBatchDmlResponse]: + r"""Return a callable for the execute batch dml method over gRPC. + + Executes a batch of SQL DML statements. This method allows many + statements to be run with lower latency than submitting them + sequentially with + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. + + Statements are executed in sequential order. A request can + succeed even if a statement fails. The + [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status] + field in the response provides information about the statement + that failed. Clients must inspect this field to determine + whether an error occurred. + + Execution stops after the first failed statement; the remaining + statements are not executed. + + Returns: + Callable[[~.ExecuteBatchDmlRequest], + ~.ExecuteBatchDmlResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "execute_batch_dml" not in self._stubs: + self._stubs["execute_batch_dml"] = self.grpc_channel.unary_unary( + "/google.spanner.v1.Spanner/ExecuteBatchDml", + request_serializer=spanner.ExecuteBatchDmlRequest.serialize, + response_deserializer=spanner.ExecuteBatchDmlResponse.deserialize, + ) + return self._stubs["execute_batch_dml"] + + @property + def read(self) -> Callable[[spanner.ReadRequest], result_set.ResultSet]: + r"""Return a callable for the read method over gRPC. + + Reads rows from the database using key lookups and scans, as a + simple key/value style alternative to + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method + cannot be used to return a result set larger than 10 MiB; if the + read matches more data than that, the read fails with a + ``FAILED_PRECONDITION`` error. + + Reads inside read-write transactions might return ``ABORTED``. + If this occurs, the application should restart the transaction + from the beginning. See + [Transaction][google.spanner.v1.Transaction] for more details. + + Larger result sets can be yielded in streaming fashion by + calling [StreamingRead][google.spanner.v1.Spanner.StreamingRead] + instead. + + Returns: + Callable[[~.ReadRequest], + ~.ResultSet]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "read" not in self._stubs: + self._stubs["read"] = self.grpc_channel.unary_unary( + "/google.spanner.v1.Spanner/Read", + request_serializer=spanner.ReadRequest.serialize, + response_deserializer=result_set.ResultSet.deserialize, + ) + return self._stubs["read"] + + @property + def streaming_read( + self, + ) -> Callable[[spanner.ReadRequest], result_set.PartialResultSet]: + r"""Return a callable for the streaming read method over gRPC. + + Like [Read][google.spanner.v1.Spanner.Read], except returns the + result set as a stream. Unlike + [Read][google.spanner.v1.Spanner.Read], there is no limit on the + size of the returned result set. However, no individual row in + the result set can exceed 100 MiB, and no column value can + exceed 10 MiB. + + Returns: + Callable[[~.ReadRequest], + ~.PartialResultSet]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "streaming_read" not in self._stubs: + self._stubs["streaming_read"] = self.grpc_channel.unary_stream( + "/google.spanner.v1.Spanner/StreamingRead", + request_serializer=spanner.ReadRequest.serialize, + response_deserializer=result_set.PartialResultSet.deserialize, + ) + return self._stubs["streaming_read"] + + @property + def begin_transaction( + self, + ) -> Callable[[spanner.BeginTransactionRequest], transaction.Transaction]: + r"""Return a callable for the begin transaction method over gRPC. + + Begins a new transaction. This step can often be skipped: + [Read][google.spanner.v1.Spanner.Read], + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and + [Commit][google.spanner.v1.Spanner.Commit] can begin a new + transaction as a side-effect. + + Returns: + Callable[[~.BeginTransactionRequest], + ~.Transaction]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "begin_transaction" not in self._stubs: + self._stubs["begin_transaction"] = self.grpc_channel.unary_unary( + "/google.spanner.v1.Spanner/BeginTransaction", + request_serializer=spanner.BeginTransactionRequest.serialize, + response_deserializer=transaction.Transaction.deserialize, + ) + return self._stubs["begin_transaction"] + + @property + def commit(self) -> Callable[[spanner.CommitRequest], spanner.CommitResponse]: + r"""Return a callable for the commit method over gRPC. + + Commits a transaction. The request includes the mutations to be + applied to rows in the database. + + ``Commit`` might return an ``ABORTED`` error. This can occur at + any time; commonly, the cause is conflicts with concurrent + transactions. However, it can also happen for a variety of other + reasons. If ``Commit`` returns ``ABORTED``, the caller should + re-attempt the transaction from the beginning, re-using the same + session. + + Returns: + Callable[[~.CommitRequest], + ~.CommitResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "commit" not in self._stubs: + self._stubs["commit"] = self.grpc_channel.unary_unary( + "/google.spanner.v1.Spanner/Commit", + request_serializer=spanner.CommitRequest.serialize, + response_deserializer=spanner.CommitResponse.deserialize, + ) + return self._stubs["commit"] + + @property + def rollback(self) -> Callable[[spanner.RollbackRequest], empty.Empty]: + r"""Return a callable for the rollback method over gRPC. + + Rolls back a transaction, releasing any locks it holds. It is a + good idea to call this for any transaction that includes one or + more [Read][google.spanner.v1.Spanner.Read] or + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and + ultimately decides not to commit. + + ``Rollback`` returns ``OK`` if it successfully aborts the + transaction, the transaction was already aborted, or the + transaction is not found. ``Rollback`` never returns + ``ABORTED``. + + Returns: + Callable[[~.RollbackRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "rollback" not in self._stubs: + self._stubs["rollback"] = self.grpc_channel.unary_unary( + "/google.spanner.v1.Spanner/Rollback", + request_serializer=spanner.RollbackRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["rollback"] + + @property + def partition_query( + self, + ) -> Callable[[spanner.PartitionQueryRequest], spanner.PartitionResponse]: + r"""Return a callable for the partition query method over gRPC. + + Creates a set of partition tokens that can be used to execute a + query operation in parallel. Each of the returned partition + tokens can be used by + [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] + to specify a subset of the query result to read. The same + session and read-only transaction must be used by the + PartitionQueryRequest used to create the partition tokens and + the ExecuteSqlRequests that use the partition tokens. + + Partition tokens become invalid when the session used to create + them is deleted, is idle for too long, begins a new transaction, + or becomes too old. When any of these happen, it is not possible + to resume the query, and the whole operation must be restarted + from the beginning. + + Returns: + Callable[[~.PartitionQueryRequest], + ~.PartitionResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "partition_query" not in self._stubs: + self._stubs["partition_query"] = self.grpc_channel.unary_unary( + "/google.spanner.v1.Spanner/PartitionQuery", + request_serializer=spanner.PartitionQueryRequest.serialize, + response_deserializer=spanner.PartitionResponse.deserialize, + ) + return self._stubs["partition_query"] + + @property + def partition_read( + self, + ) -> Callable[[spanner.PartitionReadRequest], spanner.PartitionResponse]: + r"""Return a callable for the partition read method over gRPC. + + Creates a set of partition tokens that can be used to execute a + read operation in parallel. Each of the returned partition + tokens can be used by + [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to + specify a subset of the read result to read. The same session + and read-only transaction must be used by the + PartitionReadRequest used to create the partition tokens and the + ReadRequests that use the partition tokens. There are no + ordering guarantees on rows returned among the returned + partition tokens, or even within each individual StreamingRead + call issued with a partition_token. + + Partition tokens become invalid when the session used to create + them is deleted, is idle for too long, begins a new transaction, + or becomes too old. When any of these happen, it is not possible + to resume the read, and the whole operation must be restarted + from the beginning. + + Returns: + Callable[[~.PartitionReadRequest], + ~.PartitionResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "partition_read" not in self._stubs: + self._stubs["partition_read"] = self.grpc_channel.unary_unary( + "/google.spanner.v1.Spanner/PartitionRead", + request_serializer=spanner.PartitionReadRequest.serialize, + response_deserializer=spanner.PartitionResponse.deserialize, + ) + return self._stubs["partition_read"] + + +__all__ = ("SpannerGrpcTransport",) diff --git a/google/cloud/spanner_v1/services/spanner/transports/grpc_asyncio.py b/google/cloud/spanner_v1/services/spanner/transports/grpc_asyncio.py new file mode 100644 index 0000000000..79ab4a1f94 --- /dev/null +++ b/google/cloud/spanner_v1/services/spanner/transports/grpc_asyncio.py @@ -0,0 +1,760 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.spanner_v1.types import result_set +from google.cloud.spanner_v1.types import spanner +from google.cloud.spanner_v1.types import transaction +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import SpannerTransport, DEFAULT_CLIENT_INFO +from .grpc import SpannerGrpcTransport + + +class SpannerGrpcAsyncIOTransport(SpannerTransport): + """gRPC AsyncIO backend transport for Spanner. + + Cloud Spanner API + The Cloud Spanner API can be used to manage sessions and execute + transactions on data stored in Cloud Spanner databases. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "spanner.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "spanner.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + self._stubs = {} + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def create_session( + self, + ) -> Callable[[spanner.CreateSessionRequest], Awaitable[spanner.Session]]: + r"""Return a callable for the create session method over gRPC. + + Creates a new session. A session can be used to perform + transactions that read and/or modify data in a Cloud Spanner + database. Sessions are meant to be reused for many consecutive + transactions. + + Sessions can only execute one transaction at a time. To execute + multiple concurrent read-write/write-only transactions, create + multiple sessions. Note that standalone reads and queries use a + transaction internally, and count toward the one transaction + limit. + + Active sessions use additional server resources, so it is a good + idea to delete idle and unneeded sessions. Aside from explicit + deletes, Cloud Spanner may delete sessions for which no + operations are sent for more than an hour. If a session is + deleted, requests to it return ``NOT_FOUND``. + + Idle sessions can be kept alive by sending a trivial SQL query + periodically, e.g., ``"SELECT 1"``. + + Returns: + Callable[[~.CreateSessionRequest], + Awaitable[~.Session]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_session" not in self._stubs: + self._stubs["create_session"] = self.grpc_channel.unary_unary( + "/google.spanner.v1.Spanner/CreateSession", + request_serializer=spanner.CreateSessionRequest.serialize, + response_deserializer=spanner.Session.deserialize, + ) + return self._stubs["create_session"] + + @property + def batch_create_sessions( + self, + ) -> Callable[ + [spanner.BatchCreateSessionsRequest], + Awaitable[spanner.BatchCreateSessionsResponse], + ]: + r"""Return a callable for the batch create sessions method over gRPC. + + Creates multiple new sessions. + This API can be used to initialize a session cache on + the clients. See https://goo.gl/TgSFN2 for best + practices on session cache management. + + Returns: + Callable[[~.BatchCreateSessionsRequest], + Awaitable[~.BatchCreateSessionsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_create_sessions" not in self._stubs: + self._stubs["batch_create_sessions"] = self.grpc_channel.unary_unary( + "/google.spanner.v1.Spanner/BatchCreateSessions", + request_serializer=spanner.BatchCreateSessionsRequest.serialize, + response_deserializer=spanner.BatchCreateSessionsResponse.deserialize, + ) + return self._stubs["batch_create_sessions"] + + @property + def get_session( + self, + ) -> Callable[[spanner.GetSessionRequest], Awaitable[spanner.Session]]: + r"""Return a callable for the get session method over gRPC. + + Gets a session. Returns ``NOT_FOUND`` if the session does not + exist. This is mainly useful for determining whether a session + is still alive. + + Returns: + Callable[[~.GetSessionRequest], + Awaitable[~.Session]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_session" not in self._stubs: + self._stubs["get_session"] = self.grpc_channel.unary_unary( + "/google.spanner.v1.Spanner/GetSession", + request_serializer=spanner.GetSessionRequest.serialize, + response_deserializer=spanner.Session.deserialize, + ) + return self._stubs["get_session"] + + @property + def list_sessions( + self, + ) -> Callable[ + [spanner.ListSessionsRequest], Awaitable[spanner.ListSessionsResponse] + ]: + r"""Return a callable for the list sessions method over gRPC. + + Lists all sessions in a given database. + + Returns: + Callable[[~.ListSessionsRequest], + Awaitable[~.ListSessionsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_sessions" not in self._stubs: + self._stubs["list_sessions"] = self.grpc_channel.unary_unary( + "/google.spanner.v1.Spanner/ListSessions", + request_serializer=spanner.ListSessionsRequest.serialize, + response_deserializer=spanner.ListSessionsResponse.deserialize, + ) + return self._stubs["list_sessions"] + + @property + def delete_session( + self, + ) -> Callable[[spanner.DeleteSessionRequest], Awaitable[empty.Empty]]: + r"""Return a callable for the delete session method over gRPC. + + Ends a session, releasing server resources associated + with it. This will asynchronously trigger cancellation + of any operations that are running with this session. + + Returns: + Callable[[~.DeleteSessionRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_session" not in self._stubs: + self._stubs["delete_session"] = self.grpc_channel.unary_unary( + "/google.spanner.v1.Spanner/DeleteSession", + request_serializer=spanner.DeleteSessionRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_session"] + + @property + def execute_sql( + self, + ) -> Callable[[spanner.ExecuteSqlRequest], Awaitable[result_set.ResultSet]]: + r"""Return a callable for the execute sql method over gRPC. + + Executes an SQL statement, returning all results in a single + reply. This method cannot be used to return a result set larger + than 10 MiB; if the query yields more data than that, the query + fails with a ``FAILED_PRECONDITION`` error. + + Operations inside read-write transactions might return + ``ABORTED``. If this occurs, the application should restart the + transaction from the beginning. See + [Transaction][google.spanner.v1.Transaction] for more details. + + Larger result sets can be fetched in streaming fashion by + calling + [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] + instead. + + Returns: + Callable[[~.ExecuteSqlRequest], + Awaitable[~.ResultSet]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "execute_sql" not in self._stubs: + self._stubs["execute_sql"] = self.grpc_channel.unary_unary( + "/google.spanner.v1.Spanner/ExecuteSql", + request_serializer=spanner.ExecuteSqlRequest.serialize, + response_deserializer=result_set.ResultSet.deserialize, + ) + return self._stubs["execute_sql"] + + @property + def execute_streaming_sql( + self, + ) -> Callable[[spanner.ExecuteSqlRequest], Awaitable[result_set.PartialResultSet]]: + r"""Return a callable for the execute streaming sql method over gRPC. + + Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except + returns the result set as a stream. Unlike + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there is no + limit on the size of the returned result set. However, no + individual row in the result set can exceed 100 MiB, and no + column value can exceed 10 MiB. + + Returns: + Callable[[~.ExecuteSqlRequest], + Awaitable[~.PartialResultSet]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "execute_streaming_sql" not in self._stubs: + self._stubs["execute_streaming_sql"] = self.grpc_channel.unary_stream( + "/google.spanner.v1.Spanner/ExecuteStreamingSql", + request_serializer=spanner.ExecuteSqlRequest.serialize, + response_deserializer=result_set.PartialResultSet.deserialize, + ) + return self._stubs["execute_streaming_sql"] + + @property + def execute_batch_dml( + self, + ) -> Callable[ + [spanner.ExecuteBatchDmlRequest], Awaitable[spanner.ExecuteBatchDmlResponse] + ]: + r"""Return a callable for the execute batch dml method over gRPC. + + Executes a batch of SQL DML statements. This method allows many + statements to be run with lower latency than submitting them + sequentially with + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. + + Statements are executed in sequential order. A request can + succeed even if a statement fails. The + [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status] + field in the response provides information about the statement + that failed. Clients must inspect this field to determine + whether an error occurred. + + Execution stops after the first failed statement; the remaining + statements are not executed. + + Returns: + Callable[[~.ExecuteBatchDmlRequest], + Awaitable[~.ExecuteBatchDmlResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "execute_batch_dml" not in self._stubs: + self._stubs["execute_batch_dml"] = self.grpc_channel.unary_unary( + "/google.spanner.v1.Spanner/ExecuteBatchDml", + request_serializer=spanner.ExecuteBatchDmlRequest.serialize, + response_deserializer=spanner.ExecuteBatchDmlResponse.deserialize, + ) + return self._stubs["execute_batch_dml"] + + @property + def read(self) -> Callable[[spanner.ReadRequest], Awaitable[result_set.ResultSet]]: + r"""Return a callable for the read method over gRPC. + + Reads rows from the database using key lookups and scans, as a + simple key/value style alternative to + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method + cannot be used to return a result set larger than 10 MiB; if the + read matches more data than that, the read fails with a + ``FAILED_PRECONDITION`` error. + + Reads inside read-write transactions might return ``ABORTED``. + If this occurs, the application should restart the transaction + from the beginning. See + [Transaction][google.spanner.v1.Transaction] for more details. + + Larger result sets can be yielded in streaming fashion by + calling [StreamingRead][google.spanner.v1.Spanner.StreamingRead] + instead. + + Returns: + Callable[[~.ReadRequest], + Awaitable[~.ResultSet]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "read" not in self._stubs: + self._stubs["read"] = self.grpc_channel.unary_unary( + "/google.spanner.v1.Spanner/Read", + request_serializer=spanner.ReadRequest.serialize, + response_deserializer=result_set.ResultSet.deserialize, + ) + return self._stubs["read"] + + @property + def streaming_read( + self, + ) -> Callable[[spanner.ReadRequest], Awaitable[result_set.PartialResultSet]]: + r"""Return a callable for the streaming read method over gRPC. + + Like [Read][google.spanner.v1.Spanner.Read], except returns the + result set as a stream. Unlike + [Read][google.spanner.v1.Spanner.Read], there is no limit on the + size of the returned result set. However, no individual row in + the result set can exceed 100 MiB, and no column value can + exceed 10 MiB. + + Returns: + Callable[[~.ReadRequest], + Awaitable[~.PartialResultSet]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "streaming_read" not in self._stubs: + self._stubs["streaming_read"] = self.grpc_channel.unary_stream( + "/google.spanner.v1.Spanner/StreamingRead", + request_serializer=spanner.ReadRequest.serialize, + response_deserializer=result_set.PartialResultSet.deserialize, + ) + return self._stubs["streaming_read"] + + @property + def begin_transaction( + self, + ) -> Callable[ + [spanner.BeginTransactionRequest], Awaitable[transaction.Transaction] + ]: + r"""Return a callable for the begin transaction method over gRPC. + + Begins a new transaction. This step can often be skipped: + [Read][google.spanner.v1.Spanner.Read], + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and + [Commit][google.spanner.v1.Spanner.Commit] can begin a new + transaction as a side-effect. + + Returns: + Callable[[~.BeginTransactionRequest], + Awaitable[~.Transaction]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "begin_transaction" not in self._stubs: + self._stubs["begin_transaction"] = self.grpc_channel.unary_unary( + "/google.spanner.v1.Spanner/BeginTransaction", + request_serializer=spanner.BeginTransactionRequest.serialize, + response_deserializer=transaction.Transaction.deserialize, + ) + return self._stubs["begin_transaction"] + + @property + def commit( + self, + ) -> Callable[[spanner.CommitRequest], Awaitable[spanner.CommitResponse]]: + r"""Return a callable for the commit method over gRPC. + + Commits a transaction. The request includes the mutations to be + applied to rows in the database. + + ``Commit`` might return an ``ABORTED`` error. This can occur at + any time; commonly, the cause is conflicts with concurrent + transactions. However, it can also happen for a variety of other + reasons. If ``Commit`` returns ``ABORTED``, the caller should + re-attempt the transaction from the beginning, re-using the same + session. + + Returns: + Callable[[~.CommitRequest], + Awaitable[~.CommitResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "commit" not in self._stubs: + self._stubs["commit"] = self.grpc_channel.unary_unary( + "/google.spanner.v1.Spanner/Commit", + request_serializer=spanner.CommitRequest.serialize, + response_deserializer=spanner.CommitResponse.deserialize, + ) + return self._stubs["commit"] + + @property + def rollback(self) -> Callable[[spanner.RollbackRequest], Awaitable[empty.Empty]]: + r"""Return a callable for the rollback method over gRPC. + + Rolls back a transaction, releasing any locks it holds. It is a + good idea to call this for any transaction that includes one or + more [Read][google.spanner.v1.Spanner.Read] or + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and + ultimately decides not to commit. + + ``Rollback`` returns ``OK`` if it successfully aborts the + transaction, the transaction was already aborted, or the + transaction is not found. ``Rollback`` never returns + ``ABORTED``. + + Returns: + Callable[[~.RollbackRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "rollback" not in self._stubs: + self._stubs["rollback"] = self.grpc_channel.unary_unary( + "/google.spanner.v1.Spanner/Rollback", + request_serializer=spanner.RollbackRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["rollback"] + + @property + def partition_query( + self, + ) -> Callable[ + [spanner.PartitionQueryRequest], Awaitable[spanner.PartitionResponse] + ]: + r"""Return a callable for the partition query method over gRPC. + + Creates a set of partition tokens that can be used to execute a + query operation in parallel. Each of the returned partition + tokens can be used by + [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] + to specify a subset of the query result to read. The same + session and read-only transaction must be used by the + PartitionQueryRequest used to create the partition tokens and + the ExecuteSqlRequests that use the partition tokens. + + Partition tokens become invalid when the session used to create + them is deleted, is idle for too long, begins a new transaction, + or becomes too old. When any of these happen, it is not possible + to resume the query, and the whole operation must be restarted + from the beginning. + + Returns: + Callable[[~.PartitionQueryRequest], + Awaitable[~.PartitionResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "partition_query" not in self._stubs: + self._stubs["partition_query"] = self.grpc_channel.unary_unary( + "/google.spanner.v1.Spanner/PartitionQuery", + request_serializer=spanner.PartitionQueryRequest.serialize, + response_deserializer=spanner.PartitionResponse.deserialize, + ) + return self._stubs["partition_query"] + + @property + def partition_read( + self, + ) -> Callable[[spanner.PartitionReadRequest], Awaitable[spanner.PartitionResponse]]: + r"""Return a callable for the partition read method over gRPC. + + Creates a set of partition tokens that can be used to execute a + read operation in parallel. Each of the returned partition + tokens can be used by + [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to + specify a subset of the read result to read. The same session + and read-only transaction must be used by the + PartitionReadRequest used to create the partition tokens and the + ReadRequests that use the partition tokens. There are no + ordering guarantees on rows returned among the returned + partition tokens, or even within each individual StreamingRead + call issued with a partition_token. + + Partition tokens become invalid when the session used to create + them is deleted, is idle for too long, begins a new transaction, + or becomes too old. When any of these happen, it is not possible + to resume the read, and the whole operation must be restarted + from the beginning. + + Returns: + Callable[[~.PartitionReadRequest], + Awaitable[~.PartitionResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "partition_read" not in self._stubs: + self._stubs["partition_read"] = self.grpc_channel.unary_unary( + "/google.spanner.v1.Spanner/PartitionRead", + request_serializer=spanner.PartitionReadRequest.serialize, + response_deserializer=spanner.PartitionResponse.deserialize, + ) + return self._stubs["partition_read"] + + +__all__ = ("SpannerGrpcAsyncIOTransport",) diff --git a/google/cloud/spanner_v1/session.py b/google/cloud/spanner_v1/session.py index b3a1b7e6d8..8b33221cf9 100644 --- a/google/cloud/spanner_v1/session.py +++ b/google/cloud/spanner_v1/session.py @@ -15,19 +15,23 @@ """Wrapper for Cloud Spanner Session objects.""" from functools import total_ordering +import random import time from google.rpc.error_details_pb2 import RetryInfo # pylint: disable=ungrouped-imports -from google.api_core.exceptions import Aborted, GoogleAPICallError, NotFound +from google.api_core.exceptions import Aborted +from google.api_core.exceptions import GoogleAPICallError +from google.api_core.exceptions import NotFound import google.api_core.gapic_v1.method from google.cloud.spanner_v1._helpers import _metadata_with_prefix +from google.cloud.spanner_v1._opentelemetry_tracing import trace_call from google.cloud.spanner_v1.batch import Batch from google.cloud.spanner_v1.snapshot import Snapshot from google.cloud.spanner_v1.transaction import Transaction -from google.cloud.spanner_v1._opentelemetry_tracing import trace_call -import random +from google.cloud.spanner_v1 import ExecuteSqlRequest +from google.cloud.spanner_v1 import CreateSessionRequest # pylint: enable=ungrouped-imports @@ -112,14 +116,14 @@ def create(self): raise ValueError("Session ID already set by back-end") api = self._database.spanner_api metadata = _metadata_with_prefix(self._database.name) - kw = {} + + request = CreateSessionRequest(database=self._database.name) + if self._labels: - kw = {"session": {"labels": self._labels}} + request.session.labels = self._labels with trace_call("CloudSpanner.CreateSession", self, self._labels): - session_pb = api.create_session( - self._database.name, metadata=metadata, **kw - ) + session_pb = api.create_session(request=request, metadata=metadata,) self._session_id = session_pb.name.split("/")[-1] def exists(self): @@ -138,7 +142,7 @@ def exists(self): with trace_call("CloudSpanner.GetSession", self) as span: try: - api.get_session(self.name, metadata=metadata) + api.get_session(name=self.name, metadata=metadata) if span: span.set_attribute("session_found", True) except NotFound: @@ -162,7 +166,7 @@ def delete(self): api = self._database.spanner_api metadata = _metadata_with_prefix(self._database.name) with trace_call("CloudSpanner.DeleteSession", self): - api.delete_session(self.name, metadata=metadata) + api.delete_session(name=self.name, metadata=metadata) def ping(self): """Ping the session to keep it alive by executing "SELECT 1". @@ -173,7 +177,8 @@ def ping(self): raise ValueError("Session ID not set by back-end") api = self._database.spanner_api metadata = _metadata_with_prefix(self._database.name) - api.execute_sql(self.name, "SELECT 1", metadata=metadata) + request = ExecuteSqlRequest(session=self.name, sql="SELECT 1") + api.execute_sql(request=request, metadata=metadata) def snapshot(self, **kw): """Create a snapshot to perform a set of reads with shared staleness. @@ -249,7 +254,7 @@ def execute_sql( `QueryMode `_. :type query_options: - :class:`~google.cloud.spanner_v1.proto.ExecuteSqlRequest.QueryOptions` + :class:`~google.cloud.spanner_v1.ExecuteSqlRequest.QueryOptions` or :class:`dict` :param query_options: (Optional) Options that are provided for query plan stability. diff --git a/google/cloud/spanner_v1/snapshot.py b/google/cloud/spanner_v1/snapshot.py index 42e71545d4..d417bfd1f1 100644 --- a/google/cloud/spanner_v1/snapshot.py +++ b/google/cloud/spanner_v1/snapshot.py @@ -17,21 +17,23 @@ import functools from google.protobuf.struct_pb2 import Struct -from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionOptions -from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionSelector +from google.cloud.spanner_v1 import ExecuteSqlRequest +from google.cloud.spanner_v1 import ReadRequest +from google.cloud.spanner_v1 import TransactionOptions +from google.cloud.spanner_v1 import TransactionSelector +from google.cloud.spanner_v1 import PartitionOptions +from google.cloud.spanner_v1 import PartitionQueryRequest +from google.cloud.spanner_v1 import PartitionReadRequest from google.api_core.exceptions import InternalServerError from google.api_core.exceptions import ServiceUnavailable import google.api_core.gapic_v1.method -from google.cloud._helpers import _datetime_to_pb_timestamp -from google.cloud.spanner_v1._helpers import _merge_query_options -from google.cloud._helpers import _timedelta_to_duration_pb from google.cloud.spanner_v1._helpers import _make_value_pb +from google.cloud.spanner_v1._helpers import _merge_query_options from google.cloud.spanner_v1._helpers import _metadata_with_prefix from google.cloud.spanner_v1._helpers import _SessionWrapper -from google.cloud.spanner_v1.streamed import StreamedResultSet -from google.cloud.spanner_v1.types import PartitionOptions from google.cloud.spanner_v1._opentelemetry_tracing import trace_call +from google.cloud.spanner_v1.streamed import StreamedResultSet _STREAM_RESUMPTION_INTERNAL_ERROR_MESSAGES = ( "RST_STREAM", @@ -150,17 +152,18 @@ def read(self, table, columns, keyset, index="", limit=0, partition=None): metadata = _metadata_with_prefix(database.name) transaction = self._make_txn_selector() - restart = functools.partial( - api.streaming_read, - self._session.name, - table, - columns, - keyset._to_pb(), + request = ReadRequest( + session=self._session.name, + table=table, + columns=columns, + key_set=keyset._to_pb(), transaction=transaction, index=index, limit=limit, partition_token=partition, - metadata=metadata, + ) + restart = functools.partial( + api.streaming_read, request=request, metadata=metadata, ) trace_attributes = {"table_id": table, "columns": columns} @@ -201,18 +204,18 @@ def execute_sql( required if parameters are passed. :type query_mode: - :class:`~google.cloud.spanner_v1.proto.ExecuteSqlRequest.QueryMode` + :class:`~google.cloud.spanner_v1.ExecuteSqlRequest.QueryMode` :param query_mode: Mode governing return of results / query plan. See: `QueryMode `_. :type query_options: - :class:`~google.cloud.spanner_v1.proto.ExecuteSqlRequest.QueryOptions` + :class:`~google.cloud.spanner_v1.ExecuteSqlRequest.QueryOptions` or :class:`dict` :param query_options: (Optional) Query optimizer configuration to use for the given query. If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.QueryOptions` + message :class:`~google.cloud.spanner_v1.QueryOptions` :type partition: bytes :param partition: (Optional) one of the partition tokens returned @@ -238,7 +241,7 @@ def execute_sql( fields={key: _make_value_pb(value) for key, value in params.items()} ) else: - params_pb = None + params_pb = {} database = self._session._database metadata = _metadata_with_prefix(database.name) @@ -250,10 +253,9 @@ def execute_sql( default_query_options = database._instance._client._query_options query_options = _merge_query_options(default_query_options, query_options) - restart = functools.partial( - api.execute_streaming_sql, - self._session.name, - sql, + request = ExecuteSqlRequest( + session=self._session.name, + sql=sql, transaction=transaction, params=params_pb, param_types=param_types, @@ -261,6 +263,10 @@ def execute_sql( partition_token=partition, seqno=self._execute_sql_count, query_options=query_options, + ) + restart = functools.partial( + api.execute_streaming_sql, + request=request, metadata=metadata, retry=retry, timeout=timeout, @@ -337,21 +343,21 @@ def partition_read( partition_options = PartitionOptions( partition_size_bytes=partition_size_bytes, max_partitions=max_partitions ) + request = PartitionReadRequest( + session=self._session.name, + table=table, + columns=columns, + key_set=keyset._to_pb(), + transaction=transaction, + index=index, + partition_options=partition_options, + ) trace_attributes = {"table_id": table, "columns": columns} with trace_call( "CloudSpanner.PartitionReadOnlyTransaction", self._session, trace_attributes ): - response = api.partition_read( - session=self._session.name, - table=table, - columns=columns, - key_set=keyset._to_pb(), - transaction=transaction, - index=index, - partition_options=partition_options, - metadata=metadata, - ) + response = api.partition_read(request=request, metadata=metadata,) return [partition.partition_token for partition in response.partitions] @@ -405,10 +411,10 @@ def partition_query( if param_types is None: raise ValueError("Specify 'param_types' when passing 'params'.") params_pb = Struct( - fields={key: _make_value_pb(value) for key, value in params.items()} + fields={key: _make_value_pb(value) for (key, value) in params.items()} ) else: - params_pb = None + params_pb = Struct() database = self._session._database api = database.spanner_api @@ -417,6 +423,14 @@ def partition_query( partition_options = PartitionOptions( partition_size_bytes=partition_size_bytes, max_partitions=max_partitions ) + request = PartitionQueryRequest( + session=self._session.name, + sql=sql, + transaction=transaction, + params=params_pb, + param_types=param_types, + partition_options=partition_options, + ) trace_attributes = {"db.statement": sql} with trace_call( @@ -424,15 +438,7 @@ def partition_query( self._session, trace_attributes, ): - response = api.partition_query( - session=self._session.name, - sql=sql, - transaction=transaction, - params=params_pb, - param_types=param_types, - partition_options=partition_options, - metadata=metadata, - ) + response = api.partition_query(request=request, metadata=metadata,) return [partition.partition_token for partition in response.partitions] @@ -509,16 +515,16 @@ def _make_txn_selector(self): if self._read_timestamp: key = "read_timestamp" - value = _datetime_to_pb_timestamp(self._read_timestamp) + value = self._read_timestamp elif self._min_read_timestamp: key = "min_read_timestamp" - value = _datetime_to_pb_timestamp(self._min_read_timestamp) + value = self._min_read_timestamp elif self._max_staleness: key = "max_staleness" - value = _timedelta_to_duration_pb(self._max_staleness) + value = self._max_staleness elif self._exact_staleness: key = "exact_staleness" - value = _timedelta_to_duration_pb(self._exact_staleness) + value = self._exact_staleness else: key = "strong" value = True @@ -556,7 +562,9 @@ def begin(self): txn_selector = self._make_txn_selector() with trace_call("CloudSpanner.BeginTransaction", self._session): response = api.begin_transaction( - self._session.name, txn_selector.begin, metadata=metadata + session=self._session.name, + options=txn_selector.begin, + metadata=metadata, ) self._transaction_id = response.id return self._transaction_id diff --git a/google/cloud/spanner_v1/streamed.py b/google/cloud/spanner_v1/streamed.py index 368d7e6189..a8b15a8f2b 100644 --- a/google/cloud/spanner_v1/streamed.py +++ b/google/cloud/spanner_v1/streamed.py @@ -14,14 +14,12 @@ """Wrapper for streaming results.""" -from google.protobuf.struct_pb2 import ListValue -from google.protobuf.struct_pb2 import Value from google.cloud import exceptions -from google.cloud.spanner_v1.proto import type_pb2 +from google.cloud.spanner_v1 import TypeCode import six # pylint: disable=ungrouped-imports -from google.cloud.spanner_v1._helpers import _parse_value_pb +from google.cloud.spanner_v1._helpers import _parse_value # pylint: enable=ungrouped-imports @@ -32,7 +30,7 @@ class StreamedResultSet(object): :type response_iterator: :param response_iterator: Iterator yielding - :class:`~google.cloud.spanner_v1.proto.result_set_pb2.PartialResultSet` + :class:`~google.cloud.spanner_v1.PartialResultSet` instances. :type source: :class:`~google.cloud.spanner_v1.snapshot.Snapshot` @@ -52,7 +50,7 @@ def __init__(self, response_iterator, source=None): def fields(self): """Field descriptors for result set columns. - :rtype: list of :class:`~google.cloud.spanner_v1.proto.type_pb2.Field` + :rtype: list of :class:`~google.cloud.spanner_v1.StructType.Field` :returns: list of fields describing column names / types. """ return self._metadata.row_type.fields @@ -61,7 +59,7 @@ def fields(self): def metadata(self): """Result set metadata - :rtype: :class:`~.result_set_pb2.ResultSetMetadata` + :rtype: :class:`~google.cloud.spanner_v1.ResultSetMetadata` :returns: structure describing the results """ return self._metadata @@ -71,7 +69,7 @@ def stats(self): """Result set statistics :rtype: - :class:`~google.cloud.spanner_v1.proto.result_set_pb2.ResultSetStats` + :class:`~google.cloud.spanner_v1.ResultSetStats` :returns: structure describing status about the response """ return self._stats @@ -88,9 +86,9 @@ def _merge_chunk(self, value): """ current_column = len(self._current_row) field = self.fields[current_column] - merged = _merge_by_type(self._pending_chunk, value, field.type) + merged = _merge_by_type(self._pending_chunk, value, field.type_) self._pending_chunk = None - return merged + return _parse_value(merged, field.type_) def _merge_values(self, values): """Merge values into rows. @@ -102,7 +100,7 @@ def _merge_values(self, values): for value in values: index = len(self._current_row) field = self.fields[index] - self._current_row.append(_parse_value_pb(value, field.type)) + self._current_row.append(_parse_value(value, field.type_)) if len(self._current_row) == width: self._rows.append(self._current_row) self._current_row = [] @@ -121,7 +119,7 @@ def _consume_next(self): if source is not None and source._transaction_id is None: source._transaction_id = metadata.transaction.id - if response.HasField("stats"): # last response + if "stats" in response: # last response self._stats = response.stats values = list(response.values) @@ -199,16 +197,12 @@ class Unmergeable(ValueError): :type rhs: :class:`~google.protobuf.struct_pb2.Value` :param rhs: remaining value to be merged - :type type_: :class:`~google.cloud.spanner_v1.proto.type_pb2.Type` + :type type_: :class:`~google.cloud.spanner_v1.Type` :param type_: field type of values being merged """ def __init__(self, lhs, rhs, type_): - message = "Cannot merge %s values: %s %s" % ( - type_pb2.TypeCode.Name(type_.code), - lhs, - rhs, - ) + message = "Cannot merge %s values: %s %s" % (TypeCode(type_.code), lhs, rhs,) super(Unmergeable, self).__init__(message) @@ -219,15 +213,9 @@ def _unmergeable(lhs, rhs, type_): def _merge_float64(lhs, rhs, type_): # pylint: disable=unused-argument """Helper for '_merge_by_type'.""" - lhs_kind = lhs.WhichOneof("kind") - if lhs_kind == "string_value": - return Value(string_value=lhs.string_value + rhs.string_value) - rhs_kind = rhs.WhichOneof("kind") - array_continuation = ( - lhs_kind == "number_value" - and rhs_kind == "string_value" - and rhs.string_value == "" - ) + if type(lhs) == str: + return float(lhs + rhs) + array_continuation = type(lhs) == float and type(rhs) == str and rhs == "" if array_continuation: return lhs raise Unmergeable(lhs, rhs, type_) @@ -235,10 +223,10 @@ def _merge_float64(lhs, rhs, type_): # pylint: disable=unused-argument def _merge_string(lhs, rhs, type_): # pylint: disable=unused-argument """Helper for '_merge_by_type'.""" - return Value(string_value=lhs.string_value + rhs.string_value) + return str(lhs) + str(rhs) -_UNMERGEABLE_TYPES = (type_pb2.BOOL,) +_UNMERGEABLE_TYPES = (TypeCode.BOOL,) def _merge_array(lhs, rhs, type_): @@ -246,17 +234,17 @@ def _merge_array(lhs, rhs, type_): element_type = type_.array_element_type if element_type.code in _UNMERGEABLE_TYPES: # Individual values cannot be merged, just concatenate - lhs.list_value.values.extend(rhs.list_value.values) + lhs.extend(rhs) return lhs - lhs, rhs = list(lhs.list_value.values), list(rhs.list_value.values) # Sanity check: If either list is empty, short-circuit. # This is effectively a no-op. if not len(lhs) or not len(rhs): - return Value(list_value=ListValue(values=(lhs + rhs))) + lhs.extend(rhs) + return lhs first = rhs.pop(0) - if first.HasField("null_value"): # can't merge + if first is None: # can't merge lhs.append(first) else: last = lhs.pop() @@ -267,22 +255,23 @@ def _merge_array(lhs, rhs, type_): lhs.append(first) else: lhs.append(merged) - return Value(list_value=ListValue(values=(lhs + rhs))) + lhs.extend(rhs) + return lhs def _merge_struct(lhs, rhs, type_): """Helper for '_merge_by_type'.""" fields = type_.struct_type.fields - lhs, rhs = list(lhs.list_value.values), list(rhs.list_value.values) # Sanity check: If either list is empty, short-circuit. # This is effectively a no-op. if not len(lhs) or not len(rhs): - return Value(list_value=ListValue(values=(lhs + rhs))) + lhs.extend(rhs) + return lhs - candidate_type = fields[len(lhs) - 1].type + candidate_type = fields[len(lhs) - 1].type_ first = rhs.pop(0) - if first.HasField("null_value") or candidate_type.code in _UNMERGEABLE_TYPES: + if first is None or candidate_type.code in _UNMERGEABLE_TYPES: lhs.append(first) else: last = lhs.pop() @@ -293,19 +282,20 @@ def _merge_struct(lhs, rhs, type_): lhs.append(first) else: lhs.append(merged) - return Value(list_value=ListValue(values=lhs + rhs)) + lhs.extend(rhs) + return lhs _MERGE_BY_TYPE = { - type_pb2.ARRAY: _merge_array, - type_pb2.BOOL: _unmergeable, - type_pb2.BYTES: _merge_string, - type_pb2.DATE: _merge_string, - type_pb2.FLOAT64: _merge_float64, - type_pb2.INT64: _merge_string, - type_pb2.STRING: _merge_string, - type_pb2.STRUCT: _merge_struct, - type_pb2.TIMESTAMP: _merge_string, + TypeCode.ARRAY: _merge_array, + TypeCode.BOOL: _unmergeable, + TypeCode.BYTES: _merge_string, + TypeCode.DATE: _merge_string, + TypeCode.FLOAT64: _merge_float64, + TypeCode.INT64: _merge_string, + TypeCode.STRING: _merge_string, + TypeCode.STRUCT: _merge_struct, + TypeCode.TIMESTAMP: _merge_string, } diff --git a/google/cloud/spanner_v1/transaction.py b/google/cloud/spanner_v1/transaction.py index 40116a9bbb..51d5826f41 100644 --- a/google/cloud/spanner_v1/transaction.py +++ b/google/cloud/spanner_v1/transaction.py @@ -16,14 +16,15 @@ from google.protobuf.struct_pb2 import Struct -from google.cloud._helpers import _pb_timestamp_to_datetime from google.cloud.spanner_v1._helpers import ( _make_value_pb, _merge_query_options, _metadata_with_prefix, ) -from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionSelector -from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionOptions +from google.cloud.spanner_v1 import ExecuteBatchDmlRequest +from google.cloud.spanner_v1 import ExecuteSqlRequest +from google.cloud.spanner_v1 import TransactionSelector +from google.cloud.spanner_v1 import TransactionOptions from google.cloud.spanner_v1.snapshot import _SnapshotBase from google.cloud.spanner_v1.batch import _BatchBase from google.cloud.spanner_v1._opentelemetry_tracing import trace_call @@ -98,7 +99,7 @@ def begin(self): txn_options = TransactionOptions(read_write=TransactionOptions.ReadWrite()) with trace_call("CloudSpanner.BeginTransaction", self._session): response = api.begin_transaction( - self._session.name, txn_options, metadata=metadata + session=self._session.name, options=txn_options, metadata=metadata ) self._transaction_id = response.id return self._transaction_id @@ -110,7 +111,11 @@ def rollback(self): api = database.spanner_api metadata = _metadata_with_prefix(database.name) with trace_call("CloudSpanner.Rollback", self._session): - api.rollback(self._session.name, self._transaction_id, metadata=metadata) + api.rollback( + session=self._session.name, + transaction_id=self._transaction_id, + metadata=metadata, + ) self.rolled_back = True del self._session._transaction @@ -129,12 +134,12 @@ def commit(self): trace_attributes = {"num_mutations": len(self._mutations)} with trace_call("CloudSpanner.Commit", self._session, trace_attributes): response = api.commit( - self._session.name, + session=self._session.name, mutations=self._mutations, transaction_id=self._transaction_id, metadata=metadata, ) - self.committed = _pb_timestamp_to_datetime(response.commit_timestamp) + self.committed = response.commit_timestamp del self._session._transaction return self.committed @@ -168,7 +173,7 @@ def _make_params_pb(params, param_types): if param_types is not None: raise ValueError("Specify 'params' when passing 'param_types'.") - return None + return {} def execute_update( self, dml, params=None, param_types=None, query_mode=None, query_options=None @@ -188,13 +193,13 @@ def execute_update( required if parameters are passed. :type query_mode: - :class:`~google.cloud.spanner_v1.proto.ExecuteSqlRequest.QueryMode` + :class:`~google.cloud.spanner_v1.ExecuteSqlRequest.QueryMode` :param query_mode: Mode governing return of results / query plan. See: `QueryMode `_. :type query_options: - :class:`~google.cloud.spanner_v1.proto.ExecuteSqlRequest.QueryOptions` + :class:`~google.cloud.spanner_v1.ExecuteSqlRequest.QueryOptions` or :class:`dict` :param query_options: (Optional) Options that are provided for query plan stability. @@ -218,20 +223,20 @@ def execute_update( query_options = _merge_query_options(default_query_options, query_options) trace_attributes = {"db.statement": dml} + request = ExecuteSqlRequest( + session=self._session.name, + sql=dml, + transaction=transaction, + params=params_pb, + param_types=param_types, + query_mode=query_mode, + query_options=query_options, + seqno=seqno, + ) with trace_call( "CloudSpanner.ReadWriteTransaction", self._session, trace_attributes ): - response = api.execute_sql( - self._session.name, - dml, - transaction=transaction, - params=params_pb, - param_types=param_types, - query_mode=query_mode, - query_options=query_options, - seqno=seqno, - metadata=metadata, - ) + response = api.execute_sql(request=request, metadata=metadata) return response.stats.row_count_exact def batch_update(self, statements): @@ -259,12 +264,14 @@ def batch_update(self, statements): parsed = [] for statement in statements: if isinstance(statement, str): - parsed.append({"sql": statement}) + parsed.append(ExecuteBatchDmlRequest.Statement(sql=statement)) else: dml, params, param_types = statement params_pb = self._make_params_pb(params, param_types) parsed.append( - {"sql": dml, "params": params_pb, "param_types": param_types} + ExecuteBatchDmlRequest.Statement( + sql=dml, params=params_pb, param_types=param_types + ) ) database = self._session._database @@ -279,16 +286,16 @@ def batch_update(self, statements): trace_attributes = { # Get just the queries from the DML statement batch - "db.statement": ";".join([statement["sql"] for statement in parsed]) + "db.statement": ";".join([statement.sql for statement in parsed]) } + request = ExecuteBatchDmlRequest( + session=self._session.name, + transaction=transaction, + statements=parsed, + seqno=seqno, + ) with trace_call("CloudSpanner.DMLTransaction", self._session, trace_attributes): - response = api.execute_batch_dml( - session=self._session.name, - transaction=transaction, - statements=parsed, - seqno=seqno, - metadata=metadata, - ) + response = api.execute_batch_dml(request=request, metadata=metadata) row_counts = [ result_set.stats.row_count_exact for result_set in response.result_sets ] diff --git a/google/cloud/spanner_v1/types.py b/google/cloud/spanner_v1/types.py deleted file mode 100644 index 07c94ba871..0000000000 --- a/google/cloud/spanner_v1/types.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -import sys - -from google.api import http_pb2 -from google.protobuf import descriptor_pb2 -from google.protobuf import duration_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import struct_pb2 -from google.protobuf import timestamp_pb2 - -from google.api_core.protobuf_helpers import get_messages -from google.cloud.spanner_v1.proto import keys_pb2 -from google.cloud.spanner_v1.proto import mutation_pb2 -from google.cloud.spanner_v1.proto import query_plan_pb2 -from google.cloud.spanner_v1.proto import result_set_pb2 -from google.cloud.spanner_v1.proto import spanner_pb2 -from google.cloud.spanner_v1.proto import transaction_pb2 -from google.cloud.spanner_v1.proto import type_pb2 - - -_shared_modules = [ - http_pb2, - descriptor_pb2, - duration_pb2, - empty_pb2, - struct_pb2, - timestamp_pb2, -] - -_local_modules = [ - keys_pb2, - mutation_pb2, - query_plan_pb2, - result_set_pb2, - spanner_pb2, - transaction_pb2, - type_pb2, -] - -names = [] - -for module in _shared_modules: - for name, message in get_messages(module).items(): - setattr(sys.modules[__name__], name, message) - names.append(name) - -for module in _local_modules: - for name, message in get_messages(module).items(): - message.__module__ = "google.cloud.spanner_v1.types" - setattr(sys.modules[__name__], name, message) - names.append(name) - -__all__ = tuple(sorted(names)) diff --git a/google/cloud/spanner_v1/types/__init__.py b/google/cloud/spanner_v1/types/__init__.py new file mode 100644 index 0000000000..890a024f01 --- /dev/null +++ b/google/cloud/spanner_v1/types/__init__.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .keys import ( + KeyRange, + KeySet, +) +from .mutation import Mutation +from .query_plan import ( + PlanNode, + QueryPlan, +) +from .transaction import ( + TransactionOptions, + Transaction, + TransactionSelector, +) +from .type import ( + Type, + StructType, +) +from .result_set import ( + ResultSet, + PartialResultSet, + ResultSetMetadata, + ResultSetStats, +) +from .spanner import ( + CreateSessionRequest, + BatchCreateSessionsRequest, + BatchCreateSessionsResponse, + Session, + GetSessionRequest, + ListSessionsRequest, + ListSessionsResponse, + DeleteSessionRequest, + ExecuteSqlRequest, + ExecuteBatchDmlRequest, + ExecuteBatchDmlResponse, + PartitionOptions, + PartitionQueryRequest, + PartitionReadRequest, + Partition, + PartitionResponse, + ReadRequest, + BeginTransactionRequest, + CommitRequest, + CommitResponse, + RollbackRequest, +) + + +__all__ = ( + "KeyRange", + "KeySet", + "Mutation", + "PlanNode", + "QueryPlan", + "TransactionOptions", + "Transaction", + "TransactionSelector", + "Type", + "StructType", + "ResultSet", + "PartialResultSet", + "ResultSetMetadata", + "ResultSetStats", + "CreateSessionRequest", + "BatchCreateSessionsRequest", + "BatchCreateSessionsResponse", + "Session", + "GetSessionRequest", + "ListSessionsRequest", + "ListSessionsResponse", + "DeleteSessionRequest", + "ExecuteSqlRequest", + "ExecuteBatchDmlRequest", + "ExecuteBatchDmlResponse", + "PartitionOptions", + "PartitionQueryRequest", + "PartitionReadRequest", + "Partition", + "PartitionResponse", + "ReadRequest", + "BeginTransactionRequest", + "CommitRequest", + "CommitResponse", + "RollbackRequest", +) diff --git a/google/cloud/spanner_v1/types/keys.py b/google/cloud/spanner_v1/types/keys.py new file mode 100644 index 0000000000..342d14829c --- /dev/null +++ b/google/cloud/spanner_v1/types/keys.py @@ -0,0 +1,210 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import struct_pb2 as struct # type: ignore + + +__protobuf__ = proto.module( + package="google.spanner.v1", manifest={"KeyRange", "KeySet",}, +) + + +class KeyRange(proto.Message): + r"""KeyRange represents a range of rows in a table or index. + + A range has a start key and an end key. These keys can be open or + closed, indicating if the range includes rows with that key. + + Keys are represented by lists, where the ith value in the list + corresponds to the ith component of the table or index primary key. + Individual values are encoded as described + [here][google.spanner.v1.TypeCode]. + + For example, consider the following table definition: + + :: + + CREATE TABLE UserEvents ( + UserName STRING(MAX), + EventDate STRING(10) + ) PRIMARY KEY(UserName, EventDate); + + The following keys name rows in this table: + + :: + + ["Bob", "2014-09-23"] + ["Alfred", "2015-06-12"] + + Since the ``UserEvents`` table's ``PRIMARY KEY`` clause names two + columns, each ``UserEvents`` key has two elements; the first is the + ``UserName``, and the second is the ``EventDate``. + + Key ranges with multiple components are interpreted + lexicographically by component using the table or index key's + declared sort order. For example, the following range returns all + events for user ``"Bob"`` that occurred in the year 2015: + + :: + + "start_closed": ["Bob", "2015-01-01"] + "end_closed": ["Bob", "2015-12-31"] + + Start and end keys can omit trailing key components. This affects + the inclusion and exclusion of rows that exactly match the provided + key components: if the key is closed, then rows that exactly match + the provided components are included; if the key is open, then rows + that exactly match are not included. + + For example, the following range includes all events for ``"Bob"`` + that occurred during and after the year 2000: + + :: + + "start_closed": ["Bob", "2000-01-01"] + "end_closed": ["Bob"] + + The next example retrieves all events for ``"Bob"``: + + :: + + "start_closed": ["Bob"] + "end_closed": ["Bob"] + + To retrieve events before the year 2000: + + :: + + "start_closed": ["Bob"] + "end_open": ["Bob", "2000-01-01"] + + The following range includes all rows in the table: + + :: + + "start_closed": [] + "end_closed": [] + + This range returns all users whose ``UserName`` begins with any + character from A to C: + + :: + + "start_closed": ["A"] + "end_open": ["D"] + + This range returns all users whose ``UserName`` begins with B: + + :: + + "start_closed": ["B"] + "end_open": ["C"] + + Key ranges honor column sort order. For example, suppose a table is + defined as follows: + + :: + + CREATE TABLE DescendingSortedTable { + Key INT64, + ... + ) PRIMARY KEY(Key DESC); + + The following range retrieves all rows with key values between 1 and + 100 inclusive: + + :: + + "start_closed": ["100"] + "end_closed": ["1"] + + Note that 100 is passed as the start, and 1 is passed as the end, + because ``Key`` is a descending column in the schema. + + Attributes: + start_closed (~.struct.ListValue): + If the start is closed, then the range includes all rows + whose first ``len(start_closed)`` key columns exactly match + ``start_closed``. + start_open (~.struct.ListValue): + If the start is open, then the range excludes rows whose + first ``len(start_open)`` key columns exactly match + ``start_open``. + end_closed (~.struct.ListValue): + If the end is closed, then the range includes all rows whose + first ``len(end_closed)`` key columns exactly match + ``end_closed``. + end_open (~.struct.ListValue): + If the end is open, then the range excludes rows whose first + ``len(end_open)`` key columns exactly match ``end_open``. + """ + + start_closed = proto.Field( + proto.MESSAGE, number=1, oneof="start_key_type", message=struct.ListValue, + ) + + start_open = proto.Field( + proto.MESSAGE, number=2, oneof="start_key_type", message=struct.ListValue, + ) + + end_closed = proto.Field( + proto.MESSAGE, number=3, oneof="end_key_type", message=struct.ListValue, + ) + + end_open = proto.Field( + proto.MESSAGE, number=4, oneof="end_key_type", message=struct.ListValue, + ) + + +class KeySet(proto.Message): + r"""``KeySet`` defines a collection of Cloud Spanner keys and/or key + ranges. All the keys are expected to be in the same table or index. + The keys need not be sorted in any particular way. + + If the same key is specified multiple times in the set (for example + if two ranges, two keys, or a key and a range overlap), Cloud + Spanner behaves as if the key were only specified once. + + Attributes: + keys (Sequence[~.struct.ListValue]): + A list of specific keys. Entries in ``keys`` should have + exactly as many elements as there are columns in the primary + or index key with which this ``KeySet`` is used. Individual + key values are encoded as described + [here][google.spanner.v1.TypeCode]. + ranges (Sequence[~.gs_keys.KeyRange]): + A list of key ranges. See + [KeyRange][google.spanner.v1.KeyRange] for more information + about key range specifications. + all_ (bool): + For convenience ``all`` can be set to ``true`` to indicate + that this ``KeySet`` matches all keys in the table or index. + Note that any keys specified in ``keys`` or ``ranges`` are + only yielded once. + """ + + keys = proto.RepeatedField(proto.MESSAGE, number=1, message=struct.ListValue,) + + ranges = proto.RepeatedField(proto.MESSAGE, number=2, message="KeyRange",) + + all_ = proto.Field(proto.BOOL, number=3) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/spanner_v1/types/mutation.py b/google/cloud/spanner_v1/types/mutation.py new file mode 100644 index 0000000000..5c22aae7ee --- /dev/null +++ b/google/cloud/spanner_v1/types/mutation.py @@ -0,0 +1,145 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.spanner_v1.types import keys +from google.protobuf import struct_pb2 as struct # type: ignore + + +__protobuf__ = proto.module(package="google.spanner.v1", manifest={"Mutation",},) + + +class Mutation(proto.Message): + r"""A modification to one or more Cloud Spanner rows. Mutations can be + applied to a Cloud Spanner database by sending them in a + [Commit][google.spanner.v1.Spanner.Commit] call. + + Attributes: + insert (~.mutation.Mutation.Write): + Insert new rows in a table. If any of the rows already + exist, the write or transaction fails with error + ``ALREADY_EXISTS``. + update (~.mutation.Mutation.Write): + Update existing rows in a table. If any of the rows does not + already exist, the transaction fails with error + ``NOT_FOUND``. + insert_or_update (~.mutation.Mutation.Write): + Like [insert][google.spanner.v1.Mutation.insert], except + that if the row already exists, then its column values are + overwritten with the ones provided. Any column values not + explicitly written are preserved. + + When using + [insert_or_update][google.spanner.v1.Mutation.insert_or_update], + just as when using + [insert][google.spanner.v1.Mutation.insert], all + ``NOT NULL`` columns in the table must be given a value. + This holds true even when the row already exists and will + therefore actually be updated. + replace (~.mutation.Mutation.Write): + Like [insert][google.spanner.v1.Mutation.insert], except + that if the row already exists, it is deleted, and the + column values provided are inserted instead. Unlike + [insert_or_update][google.spanner.v1.Mutation.insert_or_update], + this means any values not explicitly written become + ``NULL``. + + In an interleaved table, if you create the child table with + the ``ON DELETE CASCADE`` annotation, then replacing a + parent row also deletes the child rows. Otherwise, you must + delete the child rows before you replace the parent row. + delete (~.mutation.Mutation.Delete): + Delete rows from a table. Succeeds whether or + not the named rows were present. + """ + + class Write(proto.Message): + r"""Arguments to [insert][google.spanner.v1.Mutation.insert], + [update][google.spanner.v1.Mutation.update], + [insert_or_update][google.spanner.v1.Mutation.insert_or_update], and + [replace][google.spanner.v1.Mutation.replace] operations. + + Attributes: + table (str): + Required. The table whose rows will be + written. + columns (Sequence[str]): + The names of the columns in + [table][google.spanner.v1.Mutation.Write.table] to be + written. + + The list of columns must contain enough columns to allow + Cloud Spanner to derive values for all primary key columns + in the row(s) to be modified. + values (Sequence[~.struct.ListValue]): + The values to be written. ``values`` can contain more than + one list of values. If it does, then multiple rows are + written, one for each entry in ``values``. Each list in + ``values`` must have exactly as many entries as there are + entries in + [columns][google.spanner.v1.Mutation.Write.columns] above. + Sending multiple lists is equivalent to sending multiple + ``Mutation``\ s, each containing one ``values`` entry and + repeating [table][google.spanner.v1.Mutation.Write.table] + and [columns][google.spanner.v1.Mutation.Write.columns]. + Individual values in each list are encoded as described + [here][google.spanner.v1.TypeCode]. + """ + + table = proto.Field(proto.STRING, number=1) + + columns = proto.RepeatedField(proto.STRING, number=2) + + values = proto.RepeatedField(proto.MESSAGE, number=3, message=struct.ListValue,) + + class Delete(proto.Message): + r"""Arguments to [delete][google.spanner.v1.Mutation.delete] operations. + + Attributes: + table (str): + Required. The table whose rows will be + deleted. + key_set (~.keys.KeySet): + Required. The primary keys of the rows within + [table][google.spanner.v1.Mutation.Delete.table] to delete. + The primary keys must be specified in the order in which + they appear in the ``PRIMARY KEY()`` clause of the table's + equivalent DDL statement (the DDL statement used to create + the table). Delete is idempotent. The transaction will + succeed even if some or all rows do not exist. + """ + + table = proto.Field(proto.STRING, number=1) + + key_set = proto.Field(proto.MESSAGE, number=2, message=keys.KeySet,) + + insert = proto.Field(proto.MESSAGE, number=1, oneof="operation", message=Write,) + + update = proto.Field(proto.MESSAGE, number=2, oneof="operation", message=Write,) + + insert_or_update = proto.Field( + proto.MESSAGE, number=3, oneof="operation", message=Write, + ) + + replace = proto.Field(proto.MESSAGE, number=4, oneof="operation", message=Write,) + + delete = proto.Field(proto.MESSAGE, number=5, oneof="operation", message=Delete,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/spanner_v1/types/query_plan.py b/google/cloud/spanner_v1/types/query_plan.py new file mode 100644 index 0000000000..5a0f8b5fbb --- /dev/null +++ b/google/cloud/spanner_v1/types/query_plan.py @@ -0,0 +1,165 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import struct_pb2 as struct # type: ignore + + +__protobuf__ = proto.module( + package="google.spanner.v1", manifest={"PlanNode", "QueryPlan",}, +) + + +class PlanNode(proto.Message): + r"""Node information for nodes appearing in a + [QueryPlan.plan_nodes][google.spanner.v1.QueryPlan.plan_nodes]. + + Attributes: + index (int): + The ``PlanNode``'s index in [node + list][google.spanner.v1.QueryPlan.plan_nodes]. + kind (~.query_plan.PlanNode.Kind): + Used to determine the type of node. May be needed for + visualizing different kinds of nodes differently. For + example, If the node is a + [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] node, it + will have a condensed representation which can be used to + directly embed a description of the node in its parent. + display_name (str): + The display name for the node. + child_links (Sequence[~.query_plan.PlanNode.ChildLink]): + List of child node ``index``\ es and their relationship to + this parent. + short_representation (~.query_plan.PlanNode.ShortRepresentation): + Condensed representation for + [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] nodes. + metadata (~.struct.Struct): + Attributes relevant to the node contained in a group of + key-value pairs. For example, a Parameter Reference node + could have the following information in its metadata: + + :: + + { + "parameter_reference": "param1", + "parameter_type": "array" + } + execution_stats (~.struct.Struct): + The execution statistics associated with the + node, contained in a group of key-value pairs. + Only present if the plan was returned as a + result of a profile query. For example, number + of executions, number of rows/time per execution + etc. + """ + + class Kind(proto.Enum): + r"""The kind of [PlanNode][google.spanner.v1.PlanNode]. Distinguishes + between the two different kinds of nodes that can appear in a query + plan. + """ + KIND_UNSPECIFIED = 0 + RELATIONAL = 1 + SCALAR = 2 + + class ChildLink(proto.Message): + r"""Metadata associated with a parent-child relationship appearing in a + [PlanNode][google.spanner.v1.PlanNode]. + + Attributes: + child_index (int): + The node to which the link points. + type_ (str): + The type of the link. For example, in Hash + Joins this could be used to distinguish between + the build child and the probe child, or in the + case of the child being an output variable, to + represent the tag associated with the output + variable. + variable (str): + Only present if the child node is + [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] and + corresponds to an output variable of the parent node. The + field carries the name of the output variable. For example, + a ``TableScan`` operator that reads rows from a table will + have child links to the ``SCALAR`` nodes representing the + output variables created for each column that is read by the + operator. The corresponding ``variable`` fields will be set + to the variable names assigned to the columns. + """ + + child_index = proto.Field(proto.INT32, number=1) + + type_ = proto.Field(proto.STRING, number=2) + + variable = proto.Field(proto.STRING, number=3) + + class ShortRepresentation(proto.Message): + r"""Condensed representation of a node and its subtree. Only present for + ``SCALAR`` [PlanNode(s)][google.spanner.v1.PlanNode]. + + Attributes: + description (str): + A string representation of the expression + subtree rooted at this node. + subqueries (Sequence[~.query_plan.PlanNode.ShortRepresentation.SubqueriesEntry]): + A mapping of (subquery variable name) -> (subquery node id) + for cases where the ``description`` string of this node + references a ``SCALAR`` subquery contained in the expression + subtree rooted at this node. The referenced ``SCALAR`` + subquery may not necessarily be a direct child of this node. + """ + + description = proto.Field(proto.STRING, number=1) + + subqueries = proto.MapField(proto.STRING, proto.INT32, number=2) + + index = proto.Field(proto.INT32, number=1) + + kind = proto.Field(proto.ENUM, number=2, enum=Kind,) + + display_name = proto.Field(proto.STRING, number=3) + + child_links = proto.RepeatedField(proto.MESSAGE, number=4, message=ChildLink,) + + short_representation = proto.Field( + proto.MESSAGE, number=5, message=ShortRepresentation, + ) + + metadata = proto.Field(proto.MESSAGE, number=6, message=struct.Struct,) + + execution_stats = proto.Field(proto.MESSAGE, number=7, message=struct.Struct,) + + +class QueryPlan(proto.Message): + r"""Contains an ordered list of nodes appearing in the query + plan. + + Attributes: + plan_nodes (Sequence[~.query_plan.PlanNode]): + The nodes in the query plan. Plan nodes are returned in + pre-order starting with the plan root. Each + [PlanNode][google.spanner.v1.PlanNode]'s ``id`` corresponds + to its index in ``plan_nodes``. + """ + + plan_nodes = proto.RepeatedField(proto.MESSAGE, number=1, message="PlanNode",) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/spanner_v1/types/result_set.py b/google/cloud/spanner_v1/types/result_set.py new file mode 100644 index 0000000000..71b4dceac2 --- /dev/null +++ b/google/cloud/spanner_v1/types/result_set.py @@ -0,0 +1,263 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.spanner_v1.types import query_plan as gs_query_plan +from google.cloud.spanner_v1.types import transaction as gs_transaction +from google.cloud.spanner_v1.types import type as gs_type +from google.protobuf import struct_pb2 as struct # type: ignore + + +__protobuf__ = proto.module( + package="google.spanner.v1", + manifest={"ResultSet", "PartialResultSet", "ResultSetMetadata", "ResultSetStats",}, +) + + +class ResultSet(proto.Message): + r"""Results from [Read][google.spanner.v1.Spanner.Read] or + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. + + Attributes: + metadata (~.result_set.ResultSetMetadata): + Metadata about the result set, such as row + type information. + rows (Sequence[~.struct.ListValue]): + Each element in ``rows`` is a row whose format is defined by + [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. + The ith element in each row matches the ith field in + [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. + Elements are encoded based on type as described + [here][google.spanner.v1.TypeCode]. + stats (~.result_set.ResultSetStats): + Query plan and execution statistics for the SQL statement + that produced this result set. These can be requested by + setting + [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode]. + DML statements always produce stats containing the number of + rows modified, unless executed using the + [ExecuteSqlRequest.QueryMode.PLAN][google.spanner.v1.ExecuteSqlRequest.QueryMode.PLAN] + [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode]. + Other fields may or may not be populated, based on the + [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode]. + """ + + metadata = proto.Field(proto.MESSAGE, number=1, message="ResultSetMetadata",) + + rows = proto.RepeatedField(proto.MESSAGE, number=2, message=struct.ListValue,) + + stats = proto.Field(proto.MESSAGE, number=3, message="ResultSetStats",) + + +class PartialResultSet(proto.Message): + r"""Partial results from a streaming read or SQL query. Streaming + reads and SQL queries better tolerate large result sets, large + rows, and large values, but are a little trickier to consume. + + Attributes: + metadata (~.result_set.ResultSetMetadata): + Metadata about the result set, such as row + type information. Only present in the first + response. + values (Sequence[~.struct.Value]): + A streamed result set consists of a stream of values, which + might be split into many ``PartialResultSet`` messages to + accommodate large rows and/or large values. Every N complete + values defines a row, where N is equal to the number of + entries in + [metadata.row_type.fields][google.spanner.v1.StructType.fields]. + + Most values are encoded based on type as described + [here][google.spanner.v1.TypeCode]. + + It is possible that the last value in values is "chunked", + meaning that the rest of the value is sent in subsequent + ``PartialResultSet``\ (s). This is denoted by the + [chunked_value][google.spanner.v1.PartialResultSet.chunked_value] + field. Two or more chunked values can be merged to form a + complete value as follows: + + - ``bool/number/null``: cannot be chunked + - ``string``: concatenate the strings + - ``list``: concatenate the lists. If the last element in a + list is a ``string``, ``list``, or ``object``, merge it + with the first element in the next list by applying these + rules recursively. + - ``object``: concatenate the (field name, field value) + pairs. If a field name is duplicated, then apply these + rules recursively to merge the field values. + + Some examples of merging: + + :: + + # Strings are concatenated. + "foo", "bar" => "foobar" + + # Lists of non-strings are concatenated. + [2, 3], [4] => [2, 3, 4] + + # Lists are concatenated, but the last and first elements are merged + # because they are strings. + ["a", "b"], ["c", "d"] => ["a", "bc", "d"] + + # Lists are concatenated, but the last and first elements are merged + # because they are lists. Recursively, the last and first elements + # of the inner lists are merged because they are strings. + ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"] + + # Non-overlapping object fields are combined. + {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"} + + # Overlapping object fields are merged. + {"a": "1"}, {"a": "2"} => {"a": "12"} + + # Examples of merging objects containing lists of strings. + {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]} + + For a more complete example, suppose a streaming SQL query + is yielding a result set whose rows contain a single string + field. The following ``PartialResultSet``\ s might be + yielded: + + :: + + { + "metadata": { ... } + "values": ["Hello", "W"] + "chunked_value": true + "resume_token": "Af65..." + } + { + "values": ["orl"] + "chunked_value": true + "resume_token": "Bqp2..." + } + { + "values": ["d"] + "resume_token": "Zx1B..." + } + + This sequence of ``PartialResultSet``\ s encodes two rows, + one containing the field value ``"Hello"``, and a second + containing the field value ``"World" = "W" + "orl" + "d"``. + chunked_value (bool): + If true, then the final value in + [values][google.spanner.v1.PartialResultSet.values] is + chunked, and must be combined with more values from + subsequent ``PartialResultSet``\ s to obtain a complete + field value. + resume_token (bytes): + Streaming calls might be interrupted for a variety of + reasons, such as TCP connection loss. If this occurs, the + stream of results can be resumed by re-sending the original + request and including ``resume_token``. Note that executing + any other transaction in the same session invalidates the + token. + stats (~.result_set.ResultSetStats): + Query plan and execution statistics for the statement that + produced this streaming result set. These can be requested + by setting + [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode] + and are sent only once with the last response in the stream. + This field will also be present in the last response for DML + statements. + """ + + metadata = proto.Field(proto.MESSAGE, number=1, message="ResultSetMetadata",) + + values = proto.RepeatedField(proto.MESSAGE, number=2, message=struct.Value,) + + chunked_value = proto.Field(proto.BOOL, number=3) + + resume_token = proto.Field(proto.BYTES, number=4) + + stats = proto.Field(proto.MESSAGE, number=5, message="ResultSetStats",) + + +class ResultSetMetadata(proto.Message): + r"""Metadata about a [ResultSet][google.spanner.v1.ResultSet] or + [PartialResultSet][google.spanner.v1.PartialResultSet]. + + Attributes: + row_type (~.gs_type.StructType): + Indicates the field names and types for the rows in the + result set. For example, a SQL query like + ``"SELECT UserId, UserName FROM Users"`` could return a + ``row_type`` value like: + + :: + + "fields": [ + { "name": "UserId", "type": { "code": "INT64" } }, + { "name": "UserName", "type": { "code": "STRING" } }, + ] + transaction (~.gs_transaction.Transaction): + If the read or SQL query began a transaction + as a side-effect, the information about the new + transaction is yielded here. + """ + + row_type = proto.Field(proto.MESSAGE, number=1, message=gs_type.StructType,) + + transaction = proto.Field( + proto.MESSAGE, number=2, message=gs_transaction.Transaction, + ) + + +class ResultSetStats(proto.Message): + r"""Additional statistics about a + [ResultSet][google.spanner.v1.ResultSet] or + [PartialResultSet][google.spanner.v1.PartialResultSet]. + + Attributes: + query_plan (~.gs_query_plan.QueryPlan): + [QueryPlan][google.spanner.v1.QueryPlan] for the query + associated with this result. + query_stats (~.struct.Struct): + Aggregated statistics from the execution of the query. Only + present when the query is profiled. For example, a query + could return the statistics as follows: + + :: + + { + "rows_returned": "3", + "elapsed_time": "1.22 secs", + "cpu_time": "1.19 secs" + } + row_count_exact (int): + Standard DML returns an exact count of rows + that were modified. + row_count_lower_bound (int): + Partitioned DML does not offer exactly-once + semantics, so it returns a lower bound of the + rows modified. + """ + + query_plan = proto.Field(proto.MESSAGE, number=1, message=gs_query_plan.QueryPlan,) + + query_stats = proto.Field(proto.MESSAGE, number=2, message=struct.Struct,) + + row_count_exact = proto.Field(proto.INT64, number=3, oneof="row_count") + + row_count_lower_bound = proto.Field(proto.INT64, number=4, oneof="row_count") + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/spanner_v1/types/spanner.py b/google/cloud/spanner_v1/types/spanner.py new file mode 100644 index 0000000000..eeffd2bde5 --- /dev/null +++ b/google/cloud/spanner_v1/types/spanner.py @@ -0,0 +1,948 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.spanner_v1.types import keys +from google.cloud.spanner_v1.types import mutation +from google.cloud.spanner_v1.types import result_set +from google.cloud.spanner_v1.types import transaction as gs_transaction +from google.cloud.spanner_v1.types import type as gs_type +from google.protobuf import struct_pb2 as struct # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore +from google.rpc import status_pb2 as gr_status # type: ignore + + +__protobuf__ = proto.module( + package="google.spanner.v1", + manifest={ + "CreateSessionRequest", + "BatchCreateSessionsRequest", + "BatchCreateSessionsResponse", + "Session", + "GetSessionRequest", + "ListSessionsRequest", + "ListSessionsResponse", + "DeleteSessionRequest", + "ExecuteSqlRequest", + "ExecuteBatchDmlRequest", + "ExecuteBatchDmlResponse", + "PartitionOptions", + "PartitionQueryRequest", + "PartitionReadRequest", + "Partition", + "PartitionResponse", + "ReadRequest", + "BeginTransactionRequest", + "CommitRequest", + "CommitResponse", + "RollbackRequest", + }, +) + + +class CreateSessionRequest(proto.Message): + r"""The request for + [CreateSession][google.spanner.v1.Spanner.CreateSession]. + + Attributes: + database (str): + Required. The database in which the new + session is created. + session (~.spanner.Session): + The session to create. + """ + + database = proto.Field(proto.STRING, number=1) + + session = proto.Field(proto.MESSAGE, number=2, message="Session",) + + +class BatchCreateSessionsRequest(proto.Message): + r"""The request for + [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions]. + + Attributes: + database (str): + Required. The database in which the new + sessions are created. + session_template (~.spanner.Session): + Parameters to be applied to each created + session. + session_count (int): + Required. The number of sessions to be created in this batch + call. The API may return fewer than the requested number of + sessions. If a specific number of sessions are desired, the + client can make additional calls to BatchCreateSessions + (adjusting + [session_count][google.spanner.v1.BatchCreateSessionsRequest.session_count] + as necessary). + """ + + database = proto.Field(proto.STRING, number=1) + + session_template = proto.Field(proto.MESSAGE, number=2, message="Session",) + + session_count = proto.Field(proto.INT32, number=3) + + +class BatchCreateSessionsResponse(proto.Message): + r"""The response for + [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions]. + + Attributes: + session (Sequence[~.spanner.Session]): + The freshly created sessions. + """ + + session = proto.RepeatedField(proto.MESSAGE, number=1, message="Session",) + + +class Session(proto.Message): + r"""A session in the Cloud Spanner API. + + Attributes: + name (str): + The name of the session. This is always + system-assigned; values provided when creating a + session are ignored. + labels (Sequence[~.spanner.Session.LabelsEntry]): + The labels for the session. + + - Label keys must be between 1 and 63 characters long and + must conform to the following regular expression: + ``[a-z]([-a-z0-9]*[a-z0-9])?``. + - Label values must be between 0 and 63 characters long and + must conform to the regular expression + ``([a-z]([-a-z0-9]*[a-z0-9])?)?``. + - No more than 64 labels can be associated with a given + session. + + See https://goo.gl/xmQnxf for more information on and + examples of labels. + create_time (~.timestamp.Timestamp): + Output only. The timestamp when the session + is created. + approximate_last_use_time (~.timestamp.Timestamp): + Output only. The approximate timestamp when + the session is last used. It is typically + earlier than the actual last use time. + """ + + name = proto.Field(proto.STRING, number=1) + + labels = proto.MapField(proto.STRING, proto.STRING, number=2) + + create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + + approximate_last_use_time = proto.Field( + proto.MESSAGE, number=4, message=timestamp.Timestamp, + ) + + +class GetSessionRequest(proto.Message): + r"""The request for [GetSession][google.spanner.v1.Spanner.GetSession]. + + Attributes: + name (str): + Required. The name of the session to + retrieve. + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListSessionsRequest(proto.Message): + r"""The request for + [ListSessions][google.spanner.v1.Spanner.ListSessions]. + + Attributes: + database (str): + Required. The database in which to list + sessions. + page_size (int): + Number of sessions to be returned in the + response. If 0 or less, defaults to the server's + maximum allowed page size. + page_token (str): + If non-empty, ``page_token`` should contain a + [next_page_token][google.spanner.v1.ListSessionsResponse.next_page_token] + from a previous + [ListSessionsResponse][google.spanner.v1.ListSessionsResponse]. + filter (str): + An expression for filtering the results of the request. + Filter rules are case insensitive. The fields eligible for + filtering are: + + - ``labels.key`` where key is the name of a label + + Some examples of using filters are: + + - ``labels.env:*`` --> The session has the label "env". + - ``labels.env:dev`` --> The session has the label "env" + and the value of the label contains the string "dev". + """ + + database = proto.Field(proto.STRING, number=1) + + page_size = proto.Field(proto.INT32, number=2) + + page_token = proto.Field(proto.STRING, number=3) + + filter = proto.Field(proto.STRING, number=4) + + +class ListSessionsResponse(proto.Message): + r"""The response for + [ListSessions][google.spanner.v1.Spanner.ListSessions]. + + Attributes: + sessions (Sequence[~.spanner.Session]): + The list of requested sessions. + next_page_token (str): + ``next_page_token`` can be sent in a subsequent + [ListSessions][google.spanner.v1.Spanner.ListSessions] call + to fetch more of the matching sessions. + """ + + @property + def raw_page(self): + return self + + sessions = proto.RepeatedField(proto.MESSAGE, number=1, message="Session",) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class DeleteSessionRequest(proto.Message): + r"""The request for + [DeleteSession][google.spanner.v1.Spanner.DeleteSession]. + + Attributes: + name (str): + Required. The name of the session to delete. + """ + + name = proto.Field(proto.STRING, number=1) + + +class ExecuteSqlRequest(proto.Message): + r"""The request for [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] + and + [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql]. + + Attributes: + session (str): + Required. The session in which the SQL query + should be performed. + transaction (~.gs_transaction.TransactionSelector): + The transaction to use. + For queries, if none is provided, the default is + a temporary read-only transaction with strong + concurrency. + + Standard DML statements require a read-write + transaction. To protect against replays, single- + use transactions are not supported. The caller + must either supply an existing transaction ID or + begin a new transaction. + Partitioned DML requires an existing Partitioned + DML transaction ID. + sql (str): + Required. The SQL string. + params (~.struct.Struct): + Parameter names and values that bind to placeholders in the + SQL string. + + A parameter placeholder consists of the ``@`` character + followed by the parameter name (for example, + ``@firstName``). Parameter names can contain letters, + numbers, and underscores. + + Parameters can appear anywhere that a literal value is + expected. The same parameter name can be used more than + once, for example: + + ``"WHERE id > @msg_id AND id < @msg_id + 100"`` + + It is an error to execute a SQL statement with unbound + parameters. + param_types (Sequence[~.spanner.ExecuteSqlRequest.ParamTypesEntry]): + It is not always possible for Cloud Spanner to infer the + right SQL type from a JSON value. For example, values of + type ``BYTES`` and values of type ``STRING`` both appear in + [params][google.spanner.v1.ExecuteSqlRequest.params] as JSON + strings. + + In these cases, ``param_types`` can be used to specify the + exact SQL type for some or all of the SQL statement + parameters. See the definition of + [Type][google.spanner.v1.Type] for more information about + SQL types. + resume_token (bytes): + If this request is resuming a previously interrupted SQL + statement execution, ``resume_token`` should be copied from + the last + [PartialResultSet][google.spanner.v1.PartialResultSet] + yielded before the interruption. Doing this enables the new + SQL statement execution to resume where the last one left + off. The rest of the request parameters must exactly match + the request that yielded this token. + query_mode (~.spanner.ExecuteSqlRequest.QueryMode): + Used to control the amount of debugging information returned + in [ResultSetStats][google.spanner.v1.ResultSetStats]. If + [partition_token][google.spanner.v1.ExecuteSqlRequest.partition_token] + is set, + [query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode] + can only be set to + [QueryMode.NORMAL][google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL]. + partition_token (bytes): + If present, results will be restricted to the specified + partition previously created using PartitionQuery(). There + must be an exact match for the values of fields common to + this message and the PartitionQueryRequest message used to + create this partition_token. + seqno (int): + A per-transaction sequence number used to + identify this request. This field makes each + request idempotent such that if the request is + received multiple times, at most one will + succeed. + + The sequence number must be monotonically + increasing within the transaction. If a request + arrives for the first time with an out-of-order + sequence number, the transaction may be aborted. + Replays of previously handled requests will + yield the same response as the first execution. + Required for DML statements. Ignored for + queries. + query_options (~.spanner.ExecuteSqlRequest.QueryOptions): + Query optimizer configuration to use for the + given query. + """ + + class QueryMode(proto.Enum): + r"""Mode in which the statement must be processed.""" + NORMAL = 0 + PLAN = 1 + PROFILE = 2 + + class QueryOptions(proto.Message): + r"""Query optimizer configuration. + + Attributes: + optimizer_version (str): + An option to control the selection of optimizer version. + + This parameter allows individual queries to pick different + query optimizer versions. + + Specifying "latest" as a value instructs Cloud Spanner to + use the latest supported query optimizer version. If not + specified, Cloud Spanner uses optimizer version set at the + database level options. Any other positive integer (from the + list of supported optimizer versions) overrides the default + optimizer version for query execution. The list of supported + optimizer versions can be queried from + SPANNER_SYS.SUPPORTED_OPTIMIZER_VERSIONS. Executing a SQL + statement with an invalid optimizer version will fail with a + syntax error (``INVALID_ARGUMENT``) status. + + The ``optimizer_version`` statement hint has precedence over + this setting. + """ + + optimizer_version = proto.Field(proto.STRING, number=1) + + session = proto.Field(proto.STRING, number=1) + + transaction = proto.Field( + proto.MESSAGE, number=2, message=gs_transaction.TransactionSelector, + ) + + sql = proto.Field(proto.STRING, number=3) + + params = proto.Field(proto.MESSAGE, number=4, message=struct.Struct,) + + param_types = proto.MapField( + proto.STRING, proto.MESSAGE, number=5, message=gs_type.Type, + ) + + resume_token = proto.Field(proto.BYTES, number=6) + + query_mode = proto.Field(proto.ENUM, number=7, enum=QueryMode,) + + partition_token = proto.Field(proto.BYTES, number=8) + + seqno = proto.Field(proto.INT64, number=9) + + query_options = proto.Field(proto.MESSAGE, number=10, message=QueryOptions,) + + +class ExecuteBatchDmlRequest(proto.Message): + r"""The request for + [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. + + Attributes: + session (str): + Required. The session in which the DML + statements should be performed. + transaction (~.gs_transaction.TransactionSelector): + Required. The transaction to use. Must be a + read-write transaction. + To protect against replays, single-use + transactions are not supported. The caller must + either supply an existing transaction ID or + begin a new transaction. + statements (Sequence[~.spanner.ExecuteBatchDmlRequest.Statement]): + Required. The list of statements to execute in this batch. + Statements are executed serially, such that the effects of + statement ``i`` are visible to statement ``i+1``. Each + statement must be a DML statement. Execution stops at the + first failed statement; the remaining statements are not + executed. + + Callers must provide at least one statement. + seqno (int): + Required. A per-transaction sequence number + used to identify this request. This field makes + each request idempotent such that if the request + is received multiple times, at most one will + succeed. + + The sequence number must be monotonically + increasing within the transaction. If a request + arrives for the first time with an out-of-order + sequence number, the transaction may be aborted. + Replays of previously handled requests will + yield the same response as the first execution. + """ + + class Statement(proto.Message): + r"""A single DML statement. + + Attributes: + sql (str): + Required. The DML string. + params (~.struct.Struct): + Parameter names and values that bind to placeholders in the + DML string. + + A parameter placeholder consists of the ``@`` character + followed by the parameter name (for example, + ``@firstName``). Parameter names can contain letters, + numbers, and underscores. + + Parameters can appear anywhere that a literal value is + expected. The same parameter name can be used more than + once, for example: + + ``"WHERE id > @msg_id AND id < @msg_id + 100"`` + + It is an error to execute a SQL statement with unbound + parameters. + param_types (Sequence[~.spanner.ExecuteBatchDmlRequest.Statement.ParamTypesEntry]): + It is not always possible for Cloud Spanner to infer the + right SQL type from a JSON value. For example, values of + type ``BYTES`` and values of type ``STRING`` both appear in + [params][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params] + as JSON strings. + + In these cases, ``param_types`` can be used to specify the + exact SQL type for some or all of the SQL statement + parameters. See the definition of + [Type][google.spanner.v1.Type] for more information about + SQL types. + """ + + sql = proto.Field(proto.STRING, number=1) + + params = proto.Field(proto.MESSAGE, number=2, message=struct.Struct,) + + param_types = proto.MapField( + proto.STRING, proto.MESSAGE, number=3, message=gs_type.Type, + ) + + session = proto.Field(proto.STRING, number=1) + + transaction = proto.Field( + proto.MESSAGE, number=2, message=gs_transaction.TransactionSelector, + ) + + statements = proto.RepeatedField(proto.MESSAGE, number=3, message=Statement,) + + seqno = proto.Field(proto.INT64, number=4) + + +class ExecuteBatchDmlResponse(proto.Message): + r"""The response for + [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. + Contains a list of [ResultSet][google.spanner.v1.ResultSet] + messages, one for each DML statement that has successfully executed, + in the same order as the statements in the request. If a statement + fails, the status in the response body identifies the cause of the + failure. + + To check for DML statements that failed, use the following approach: + + 1. Check the status in the response message. The + [google.rpc.Code][google.rpc.Code] enum value ``OK`` indicates + that all statements were executed successfully. + 2. If the status was not ``OK``, check the number of result sets in + the response. If the response contains ``N`` + [ResultSet][google.spanner.v1.ResultSet] messages, then statement + ``N+1`` in the request failed. + + Example 1: + + - Request: 5 DML statements, all executed successfully. + - Response: 5 [ResultSet][google.spanner.v1.ResultSet] messages, + with the status ``OK``. + + Example 2: + + - Request: 5 DML statements. The third statement has a syntax + error. + - Response: 2 [ResultSet][google.spanner.v1.ResultSet] messages, + and a syntax error (``INVALID_ARGUMENT``) status. The number of + [ResultSet][google.spanner.v1.ResultSet] messages indicates that + the third statement failed, and the fourth and fifth statements + were not executed. + + Attributes: + result_sets (Sequence[~.result_set.ResultSet]): + One [ResultSet][google.spanner.v1.ResultSet] for each + statement in the request that ran successfully, in the same + order as the statements in the request. Each + [ResultSet][google.spanner.v1.ResultSet] does not contain + any rows. The + [ResultSetStats][google.spanner.v1.ResultSetStats] in each + [ResultSet][google.spanner.v1.ResultSet] contain the number + of rows modified by the statement. + + Only the first [ResultSet][google.spanner.v1.ResultSet] in + the response contains valid + [ResultSetMetadata][google.spanner.v1.ResultSetMetadata]. + status (~.gr_status.Status): + If all DML statements are executed successfully, the status + is ``OK``. Otherwise, the error status of the first failed + statement. + """ + + result_sets = proto.RepeatedField( + proto.MESSAGE, number=1, message=result_set.ResultSet, + ) + + status = proto.Field(proto.MESSAGE, number=2, message=gr_status.Status,) + + +class PartitionOptions(proto.Message): + r"""Options for a PartitionQueryRequest and + PartitionReadRequest. + + Attributes: + partition_size_bytes (int): + **Note:** This hint is currently ignored by PartitionQuery + and PartitionRead requests. + + The desired data size for each partition generated. The + default for this option is currently 1 GiB. This is only a + hint. The actual size of each partition may be smaller or + larger than this size request. + max_partitions (int): + **Note:** This hint is currently ignored by PartitionQuery + and PartitionRead requests. + + The desired maximum number of partitions to return. For + example, this may be set to the number of workers available. + The default for this option is currently 10,000. The maximum + value is currently 200,000. This is only a hint. The actual + number of partitions returned may be smaller or larger than + this maximum count request. + """ + + partition_size_bytes = proto.Field(proto.INT64, number=1) + + max_partitions = proto.Field(proto.INT64, number=2) + + +class PartitionQueryRequest(proto.Message): + r"""The request for + [PartitionQuery][google.spanner.v1.Spanner.PartitionQuery] + + Attributes: + session (str): + Required. The session used to create the + partitions. + transaction (~.gs_transaction.TransactionSelector): + Read only snapshot transactions are + supported, read/write and single use + transactions are not. + sql (str): + Required. The query request to generate partitions for. The + request will fail if the query is not root partitionable. + The query plan of a root partitionable query has a single + distributed union operator. A distributed union operator + conceptually divides one or more tables into multiple + splits, remotely evaluates a subquery independently on each + split, and then unions all results. + + This must not contain DML commands, such as INSERT, UPDATE, + or DELETE. Use + [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] + with a PartitionedDml transaction for large, + partition-friendly DML operations. + params (~.struct.Struct): + Parameter names and values that bind to placeholders in the + SQL string. + + A parameter placeholder consists of the ``@`` character + followed by the parameter name (for example, + ``@firstName``). Parameter names can contain letters, + numbers, and underscores. + + Parameters can appear anywhere that a literal value is + expected. The same parameter name can be used more than + once, for example: + + ``"WHERE id > @msg_id AND id < @msg_id + 100"`` + + It is an error to execute a SQL statement with unbound + parameters. + param_types (Sequence[~.spanner.PartitionQueryRequest.ParamTypesEntry]): + It is not always possible for Cloud Spanner to infer the + right SQL type from a JSON value. For example, values of + type ``BYTES`` and values of type ``STRING`` both appear in + [params][google.spanner.v1.PartitionQueryRequest.params] as + JSON strings. + + In these cases, ``param_types`` can be used to specify the + exact SQL type for some or all of the SQL query parameters. + See the definition of [Type][google.spanner.v1.Type] for + more information about SQL types. + partition_options (~.spanner.PartitionOptions): + Additional options that affect how many + partitions are created. + """ + + session = proto.Field(proto.STRING, number=1) + + transaction = proto.Field( + proto.MESSAGE, number=2, message=gs_transaction.TransactionSelector, + ) + + sql = proto.Field(proto.STRING, number=3) + + params = proto.Field(proto.MESSAGE, number=4, message=struct.Struct,) + + param_types = proto.MapField( + proto.STRING, proto.MESSAGE, number=5, message=gs_type.Type, + ) + + partition_options = proto.Field( + proto.MESSAGE, number=6, message="PartitionOptions", + ) + + +class PartitionReadRequest(proto.Message): + r"""The request for + [PartitionRead][google.spanner.v1.Spanner.PartitionRead] + + Attributes: + session (str): + Required. The session used to create the + partitions. + transaction (~.gs_transaction.TransactionSelector): + Read only snapshot transactions are + supported, read/write and single use + transactions are not. + table (str): + Required. The name of the table in the + database to be read. + index (str): + If non-empty, the name of an index on + [table][google.spanner.v1.PartitionReadRequest.table]. This + index is used instead of the table primary key when + interpreting + [key_set][google.spanner.v1.PartitionReadRequest.key_set] + and sorting result rows. See + [key_set][google.spanner.v1.PartitionReadRequest.key_set] + for further information. + columns (Sequence[str]): + The columns of + [table][google.spanner.v1.PartitionReadRequest.table] to be + returned for each row matching this request. + key_set (~.keys.KeySet): + Required. ``key_set`` identifies the rows to be yielded. + ``key_set`` names the primary keys of the rows in + [table][google.spanner.v1.PartitionReadRequest.table] to be + yielded, unless + [index][google.spanner.v1.PartitionReadRequest.index] is + present. If + [index][google.spanner.v1.PartitionReadRequest.index] is + present, then + [key_set][google.spanner.v1.PartitionReadRequest.key_set] + instead names index keys in + [index][google.spanner.v1.PartitionReadRequest.index]. + + It is not an error for the ``key_set`` to name rows that do + not exist in the database. Read yields nothing for + nonexistent rows. + partition_options (~.spanner.PartitionOptions): + Additional options that affect how many + partitions are created. + """ + + session = proto.Field(proto.STRING, number=1) + + transaction = proto.Field( + proto.MESSAGE, number=2, message=gs_transaction.TransactionSelector, + ) + + table = proto.Field(proto.STRING, number=3) + + index = proto.Field(proto.STRING, number=4) + + columns = proto.RepeatedField(proto.STRING, number=5) + + key_set = proto.Field(proto.MESSAGE, number=6, message=keys.KeySet,) + + partition_options = proto.Field( + proto.MESSAGE, number=9, message="PartitionOptions", + ) + + +class Partition(proto.Message): + r"""Information returned for each partition returned in a + PartitionResponse. + + Attributes: + partition_token (bytes): + This token can be passed to Read, + StreamingRead, ExecuteSql, or + ExecuteStreamingSql requests to restrict the + results to those identified by this partition + token. + """ + + partition_token = proto.Field(proto.BYTES, number=1) + + +class PartitionResponse(proto.Message): + r"""The response for + [PartitionQuery][google.spanner.v1.Spanner.PartitionQuery] or + [PartitionRead][google.spanner.v1.Spanner.PartitionRead] + + Attributes: + partitions (Sequence[~.spanner.Partition]): + Partitions created by this request. + transaction (~.gs_transaction.Transaction): + Transaction created by this request. + """ + + partitions = proto.RepeatedField(proto.MESSAGE, number=1, message="Partition",) + + transaction = proto.Field( + proto.MESSAGE, number=2, message=gs_transaction.Transaction, + ) + + +class ReadRequest(proto.Message): + r"""The request for [Read][google.spanner.v1.Spanner.Read] and + [StreamingRead][google.spanner.v1.Spanner.StreamingRead]. + + Attributes: + session (str): + Required. The session in which the read + should be performed. + transaction (~.gs_transaction.TransactionSelector): + The transaction to use. If none is provided, + the default is a temporary read-only transaction + with strong concurrency. + table (str): + Required. The name of the table in the + database to be read. + index (str): + If non-empty, the name of an index on + [table][google.spanner.v1.ReadRequest.table]. This index is + used instead of the table primary key when interpreting + [key_set][google.spanner.v1.ReadRequest.key_set] and sorting + result rows. See + [key_set][google.spanner.v1.ReadRequest.key_set] for further + information. + columns (Sequence[str]): + Required. The columns of + [table][google.spanner.v1.ReadRequest.table] to be returned + for each row matching this request. + key_set (~.keys.KeySet): + Required. ``key_set`` identifies the rows to be yielded. + ``key_set`` names the primary keys of the rows in + [table][google.spanner.v1.ReadRequest.table] to be yielded, + unless [index][google.spanner.v1.ReadRequest.index] is + present. If [index][google.spanner.v1.ReadRequest.index] is + present, then + [key_set][google.spanner.v1.ReadRequest.key_set] instead + names index keys in + [index][google.spanner.v1.ReadRequest.index]. + + If the + [partition_token][google.spanner.v1.ReadRequest.partition_token] + field is empty, rows are yielded in table primary key order + (if [index][google.spanner.v1.ReadRequest.index] is empty) + or index key order (if + [index][google.spanner.v1.ReadRequest.index] is non-empty). + If the + [partition_token][google.spanner.v1.ReadRequest.partition_token] + field is not empty, rows will be yielded in an unspecified + order. + + It is not an error for the ``key_set`` to name rows that do + not exist in the database. Read yields nothing for + nonexistent rows. + limit (int): + If greater than zero, only the first ``limit`` rows are + yielded. If ``limit`` is zero, the default is no limit. A + limit cannot be specified if ``partition_token`` is set. + resume_token (bytes): + If this request is resuming a previously interrupted read, + ``resume_token`` should be copied from the last + [PartialResultSet][google.spanner.v1.PartialResultSet] + yielded before the interruption. Doing this enables the new + read to resume where the last read left off. The rest of the + request parameters must exactly match the request that + yielded this token. + partition_token (bytes): + If present, results will be restricted to the specified + partition previously created using PartitionRead(). There + must be an exact match for the values of fields common to + this message and the PartitionReadRequest message used to + create this partition_token. + """ + + session = proto.Field(proto.STRING, number=1) + + transaction = proto.Field( + proto.MESSAGE, number=2, message=gs_transaction.TransactionSelector, + ) + + table = proto.Field(proto.STRING, number=3) + + index = proto.Field(proto.STRING, number=4) + + columns = proto.RepeatedField(proto.STRING, number=5) + + key_set = proto.Field(proto.MESSAGE, number=6, message=keys.KeySet,) + + limit = proto.Field(proto.INT64, number=8) + + resume_token = proto.Field(proto.BYTES, number=9) + + partition_token = proto.Field(proto.BYTES, number=10) + + +class BeginTransactionRequest(proto.Message): + r"""The request for + [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction]. + + Attributes: + session (str): + Required. The session in which the + transaction runs. + options (~.gs_transaction.TransactionOptions): + Required. Options for the new transaction. + """ + + session = proto.Field(proto.STRING, number=1) + + options = proto.Field( + proto.MESSAGE, number=2, message=gs_transaction.TransactionOptions, + ) + + +class CommitRequest(proto.Message): + r"""The request for [Commit][google.spanner.v1.Spanner.Commit]. + + Attributes: + session (str): + Required. The session in which the + transaction to be committed is running. + transaction_id (bytes): + Commit a previously-started transaction. + single_use_transaction (~.gs_transaction.TransactionOptions): + Execute mutations in a temporary transaction. Note that + unlike commit of a previously-started transaction, commit + with a temporary transaction is non-idempotent. That is, if + the ``CommitRequest`` is sent to Cloud Spanner more than + once (for instance, due to retries in the application, or in + the transport library), it is possible that the mutations + are executed more than once. If this is undesirable, use + [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction] + and [Commit][google.spanner.v1.Spanner.Commit] instead. + mutations (Sequence[~.mutation.Mutation]): + The mutations to be executed when this + transaction commits. All mutations are applied + atomically, in the order they appear in this + list. + """ + + session = proto.Field(proto.STRING, number=1) + + transaction_id = proto.Field(proto.BYTES, number=2, oneof="transaction") + + single_use_transaction = proto.Field( + proto.MESSAGE, + number=3, + oneof="transaction", + message=gs_transaction.TransactionOptions, + ) + + mutations = proto.RepeatedField(proto.MESSAGE, number=4, message=mutation.Mutation,) + + +class CommitResponse(proto.Message): + r"""The response for [Commit][google.spanner.v1.Spanner.Commit]. + + Attributes: + commit_timestamp (~.timestamp.Timestamp): + The Cloud Spanner timestamp at which the + transaction committed. + """ + + commit_timestamp = proto.Field( + proto.MESSAGE, number=1, message=timestamp.Timestamp, + ) + + +class RollbackRequest(proto.Message): + r"""The request for [Rollback][google.spanner.v1.Spanner.Rollback]. + + Attributes: + session (str): + Required. The session in which the + transaction to roll back is running. + transaction_id (bytes): + Required. The transaction to roll back. + """ + + session = proto.Field(proto.STRING, number=1) + + transaction_id = proto.Field(proto.BYTES, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/spanner_v1/types/transaction.py b/google/cloud/spanner_v1/types/transaction.py new file mode 100644 index 0000000000..7b50f228e5 --- /dev/null +++ b/google/cloud/spanner_v1/types/transaction.py @@ -0,0 +1,231 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import duration_pb2 as duration # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.spanner.v1", + manifest={"TransactionOptions", "Transaction", "TransactionSelector",}, +) + + +class TransactionOptions(proto.Message): + r"""TransactionOptions are used to specify different types of transactions. + + For more info, see: https://cloud.google.com/spanner/docs/reference/rest/v1/Transaction + + Attributes: + read_write (~.transaction.TransactionOptions.ReadWrite): + Transaction may write. + + Authorization to begin a read-write transaction requires + ``spanner.databases.beginOrRollbackReadWriteTransaction`` + permission on the ``session`` resource. + partitioned_dml (~.transaction.TransactionOptions.PartitionedDml): + Partitioned DML transaction. + + Authorization to begin a Partitioned DML transaction + requires + ``spanner.databases.beginPartitionedDmlTransaction`` + permission on the ``session`` resource. + read_only (~.transaction.TransactionOptions.ReadOnly): + Transaction will not write. + + Authorization to begin a read-only transaction requires + ``spanner.databases.beginReadOnlyTransaction`` permission on + the ``session`` resource. + """ + + class ReadWrite(proto.Message): + r"""Message type to initiate a read-write transaction. Currently + this transaction type has no options. + """ + + class PartitionedDml(proto.Message): + r"""Message type to initiate a Partitioned DML transaction.""" + + class ReadOnly(proto.Message): + r"""Message type to initiate a read-only transaction. + + Attributes: + strong (bool): + Read at a timestamp where all previously + committed transactions are visible. + min_read_timestamp (~.timestamp.Timestamp): + Executes all reads at a timestamp >= ``min_read_timestamp``. + + This is useful for requesting fresher data than some + previous read, or data that is fresh enough to observe the + effects of some previously committed transaction whose + timestamp is known. + + Note that this option can only be used in single-use + transactions. + + A timestamp in RFC3339 UTC "Zulu" format, accurate to + nanoseconds. Example: ``"2014-10-02T15:01:23.045123456Z"``. + max_staleness (~.duration.Duration): + Read data at a timestamp >= ``NOW - max_staleness`` seconds. + Guarantees that all writes that have committed more than the + specified number of seconds ago are visible. Because Cloud + Spanner chooses the exact timestamp, this mode works even if + the client's local clock is substantially skewed from Cloud + Spanner commit timestamps. + + Useful for reading the freshest data available at a nearby + replica, while bounding the possible staleness if the local + replica has fallen behind. + + Note that this option can only be used in single-use + transactions. + read_timestamp (~.timestamp.Timestamp): + Executes all reads at the given timestamp. Unlike other + modes, reads at a specific timestamp are repeatable; the + same read at the same timestamp always returns the same + data. If the timestamp is in the future, the read will block + until the specified timestamp, modulo the read's deadline. + + Useful for large scale consistent reads such as mapreduces, + or for coordinating many reads against a consistent snapshot + of the data. + + A timestamp in RFC3339 UTC "Zulu" format, accurate to + nanoseconds. Example: ``"2014-10-02T15:01:23.045123456Z"``. + exact_staleness (~.duration.Duration): + Executes all reads at a timestamp that is + ``exact_staleness`` old. The timestamp is chosen soon after + the read is started. + + Guarantees that all writes that have committed more than the + specified number of seconds ago are visible. Because Cloud + Spanner chooses the exact timestamp, this mode works even if + the client's local clock is substantially skewed from Cloud + Spanner commit timestamps. + + Useful for reading at nearby replicas without the + distributed timestamp negotiation overhead of + ``max_staleness``. + return_read_timestamp (bool): + If true, the Cloud Spanner-selected read timestamp is + included in the [Transaction][google.spanner.v1.Transaction] + message that describes the transaction. + """ + + strong = proto.Field(proto.BOOL, number=1, oneof="timestamp_bound") + + min_read_timestamp = proto.Field( + proto.MESSAGE, + number=2, + oneof="timestamp_bound", + message=timestamp.Timestamp, + ) + + max_staleness = proto.Field( + proto.MESSAGE, number=3, oneof="timestamp_bound", message=duration.Duration, + ) + + read_timestamp = proto.Field( + proto.MESSAGE, + number=4, + oneof="timestamp_bound", + message=timestamp.Timestamp, + ) + + exact_staleness = proto.Field( + proto.MESSAGE, number=5, oneof="timestamp_bound", message=duration.Duration, + ) + + return_read_timestamp = proto.Field(proto.BOOL, number=6) + + read_write = proto.Field(proto.MESSAGE, number=1, oneof="mode", message=ReadWrite,) + + partitioned_dml = proto.Field( + proto.MESSAGE, number=3, oneof="mode", message=PartitionedDml, + ) + + read_only = proto.Field(proto.MESSAGE, number=2, oneof="mode", message=ReadOnly,) + + +class Transaction(proto.Message): + r"""A transaction. + + Attributes: + id (bytes): + ``id`` may be used to identify the transaction in subsequent + [Read][google.spanner.v1.Spanner.Read], + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], + [Commit][google.spanner.v1.Spanner.Commit], or + [Rollback][google.spanner.v1.Spanner.Rollback] calls. + + Single-use read-only transactions do not have IDs, because + single-use transactions do not support multiple requests. + read_timestamp (~.timestamp.Timestamp): + For snapshot read-only transactions, the read timestamp + chosen for the transaction. Not returned by default: see + [TransactionOptions.ReadOnly.return_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.return_read_timestamp]. + + A timestamp in RFC3339 UTC "Zulu" format, accurate to + nanoseconds. Example: ``"2014-10-02T15:01:23.045123456Z"``. + """ + + id = proto.Field(proto.BYTES, number=1) + + read_timestamp = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) + + +class TransactionSelector(proto.Message): + r"""This message is used to select the transaction in which a + [Read][google.spanner.v1.Spanner.Read] or + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] call runs. + + See [TransactionOptions][google.spanner.v1.TransactionOptions] for + more information about transactions. + + Attributes: + single_use (~.transaction.TransactionOptions): + Execute the read or SQL query in a temporary + transaction. This is the most efficient way to + execute a transaction that consists of a single + SQL query. + id (bytes): + Execute the read or SQL query in a + previously-started transaction. + begin (~.transaction.TransactionOptions): + Begin a new transaction and execute this read or SQL query + in it. The transaction ID of the new transaction is returned + in + [ResultSetMetadata.transaction][google.spanner.v1.ResultSetMetadata.transaction], + which is a [Transaction][google.spanner.v1.Transaction]. + """ + + single_use = proto.Field( + proto.MESSAGE, number=1, oneof="selector", message="TransactionOptions", + ) + + id = proto.Field(proto.BYTES, number=2, oneof="selector") + + begin = proto.Field( + proto.MESSAGE, number=3, oneof="selector", message="TransactionOptions", + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/spanner_v1/types/type.py b/google/cloud/spanner_v1/types/type.py new file mode 100644 index 0000000000..19a0ffe5be --- /dev/null +++ b/google/cloud/spanner_v1/types/type.py @@ -0,0 +1,113 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.spanner.v1", manifest={"TypeCode", "Type", "StructType",}, +) + + +class TypeCode(proto.Enum): + r"""``TypeCode`` is used as part of [Type][google.spanner.v1.Type] to + indicate the type of a Cloud Spanner value. + + Each legal value of a type can be encoded to or decoded from a JSON + value, using the encodings described below. All Cloud Spanner values + can be ``null``, regardless of type; ``null``\ s are always encoded + as a JSON ``null``. + """ + TYPE_CODE_UNSPECIFIED = 0 + BOOL = 1 + INT64 = 2 + FLOAT64 = 3 + TIMESTAMP = 4 + DATE = 5 + STRING = 6 + BYTES = 7 + ARRAY = 8 + STRUCT = 9 + NUMERIC = 10 + + +class Type(proto.Message): + r"""``Type`` indicates the type of a Cloud Spanner value, as might be + stored in a table cell or returned from an SQL query. + + Attributes: + code (~.gs_type.TypeCode): + Required. The [TypeCode][google.spanner.v1.TypeCode] for + this type. + array_element_type (~.gs_type.Type): + If [code][google.spanner.v1.Type.code] == + [ARRAY][google.spanner.v1.TypeCode.ARRAY], then + ``array_element_type`` is the type of the array elements. + struct_type (~.gs_type.StructType): + If [code][google.spanner.v1.Type.code] == + [STRUCT][google.spanner.v1.TypeCode.STRUCT], then + ``struct_type`` provides type information for the struct's + fields. + """ + + code = proto.Field(proto.ENUM, number=1, enum="TypeCode",) + + array_element_type = proto.Field(proto.MESSAGE, number=2, message="Type",) + + struct_type = proto.Field(proto.MESSAGE, number=3, message="StructType",) + + +class StructType(proto.Message): + r"""``StructType`` defines the fields of a + [STRUCT][google.spanner.v1.TypeCode.STRUCT] type. + + Attributes: + fields (Sequence[~.gs_type.StructType.Field]): + The list of fields that make up this struct. Order is + significant, because values of this struct type are + represented as lists, where the order of field values + matches the order of fields in the + [StructType][google.spanner.v1.StructType]. In turn, the + order of fields matches the order of columns in a read + request, or the order of fields in the ``SELECT`` clause of + a query. + """ + + class Field(proto.Message): + r"""Message representing a single field of a struct. + + Attributes: + name (str): + The name of the field. For reads, this is the column name. + For SQL queries, it is the column alias (e.g., ``"Word"`` in + the query ``"SELECT 'hello' AS Word"``), or the column name + (e.g., ``"ColName"`` in the query + ``"SELECT ColName FROM Table"``). Some columns might have an + empty name (e.g., `"SELECT UPPER(ColName)"`). Note that a + query result can contain multiple fields with the same name. + type_ (~.gs_type.Type): + The type of the field. + """ + + name = proto.Field(proto.STRING, number=1) + + type_ = proto.Field(proto.MESSAGE, number=2, message="Type",) + + fields = proto.RepeatedField(proto.MESSAGE, number=1, message=Field,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/noxfile.py b/noxfile.py index cdd18ff886..1a6227824a 100644 --- a/noxfile.py +++ b/noxfile.py @@ -23,14 +23,15 @@ import nox -BLACK_VERSION = "black==19.3b0" +BLACK_VERSION = "black==19.10b0" BLACK_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] -if os.path.exists("samples"): - BLACK_PATHS.append("samples") +DEFAULT_PYTHON_VERSION = "3.8" +SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"] +UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8"] -@nox.session(python="3.7") +@nox.session(python=DEFAULT_PYTHON_VERSION) def lint(session): """Run linters. @@ -38,7 +39,9 @@ def lint(session): serious code quality issues. """ session.install("flake8", BLACK_VERSION) - session.run("black", "--check", *BLACK_PATHS) + session.run( + "black", "--check", *BLACK_PATHS, + ) session.run("flake8", "google", "tests") @@ -53,10 +56,12 @@ def blacken(session): check the state of the `gcp_ubuntu_config` we use for that Kokoro run. """ session.install(BLACK_VERSION) - session.run("black", *BLACK_PATHS) + session.run( + "black", *BLACK_PATHS, + ) -@nox.session(python="3.7") +@nox.session(python=DEFAULT_PYTHON_VERSION) def lint_setup_py(session): """Verify that setup.py is valid (including RST check).""" session.install("docutils", "pygments") @@ -65,17 +70,33 @@ def lint_setup_py(session): def default(session): # Install all test dependencies, then install this package in-place. - session.install("mock", "pytest", "pytest-cov") + session.install("asyncmock", "pytest-asyncio") - if session.python != "2.7": - session.install("-e", ".[tracing]") - else: - session.install("-e", ".") + session.install("mock", "pytest", "pytest-cov") + session.install("-e", ".") # Run py.test against the unit tests. session.run( "py.test", "--quiet", + "--cov=google.cloud.spanner", + "--cov=google.cloud", + "--cov=tests.unit", + "--cov-append", + "--cov-config=.coveragerc", + "--cov-report=", + "--cov-fail-under=0", + os.path.join("tests", "unit"), + *session.posargs, + ) + + session.install("-e", ".[tracing]") + + # Run py.test against the unit tests with OpenTelemetry. + session.run( + "py.test", + "--quiet", + "--cov=google.cloud.spanner", "--cov=google.cloud", "--cov=tests.unit", "--cov-append", @@ -87,18 +108,22 @@ def default(session): ) -@nox.session(python=["2.7", "3.5", "3.6", "3.7", "3.8"]) +@nox.session(python=UNIT_TEST_PYTHON_VERSIONS) def unit(session): """Run the unit test suite.""" default(session) -@nox.session(python=["2.7", "3.7"]) +@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) def system(session): """Run the system test suite.""" system_test_path = os.path.join("tests", "system.py") system_test_folder_path = os.path.join("tests", "system") - # Sanity check: Only run tests if either credentials or emulator host is set. + + # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true. + if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false": + session.skip("RUN_SYSTEM_TESTS is set to false, skipping") + # Sanity check: Only run tests if the environment variable is set. if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", "") and not os.environ.get( "SPANNER_EMULATOR_HOST", "" ): @@ -117,13 +142,10 @@ def system(session): # Install all test dependencies, then install this package into the # virtualenv's dist-packages. - session.install("mock", "pytest") - - if session.python != "2.7": - session.install("-e", ".[tracing]") - else: - session.install("-e", ".") - session.install("-e", "test_utils/") + session.install( + "mock", "pytest", "google-cloud-testutils", + ) + session.install("-e", ".[tracing]") # Run py.test against the system tests. if system_test_exists: @@ -132,7 +154,7 @@ def system(session): session.run("py.test", "--quiet", system_test_folder_path, *session.posargs) -@nox.session(python="3.7") +@nox.session(python=DEFAULT_PYTHON_VERSION) def cover(session): """Run the final coverage report. @@ -145,12 +167,12 @@ def cover(session): session.run("coverage", "erase") -@nox.session(python="3.7") +@nox.session(python=DEFAULT_PYTHON_VERSION) def docs(session): """Build the docs for this library.""" - session.install("-e", ".") - session.install("sphinx<3.0.0", "alabaster", "recommonmark") + session.install("-e", ".[tracing]") + session.install("sphinx", "alabaster", "recommonmark") shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) session.run( @@ -167,12 +189,14 @@ def docs(session): ) -@nox.session(python="3.7") +@nox.session(python=DEFAULT_PYTHON_VERSION) def docfx(session): """Build the docfx yaml files for this library.""" - session.install("-e", ".") - session.install("sphinx", "alabaster", "recommonmark", "sphinx-docfx-yaml") + session.install("-e", ".[tracing]") + # sphinx-docfx-yaml supports up to sphinx version 1.5.5. + # https://github.com/docascode/sphinx-docfx-yaml/issues/97 + session.install("sphinx==1.5.5", "alabaster", "recommonmark", "sphinx-docfx-yaml") shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) session.run( diff --git a/samples/samples/README.rst b/samples/samples/README.rst index 143402fde5..b0573c249b 100644 --- a/samples/samples/README.rst +++ b/samples/samples/README.rst @@ -1,3 +1,4 @@ + .. This file is automatically generated. Do not edit this file directly. Google Cloud Spanner Python Samples @@ -14,10 +15,12 @@ This directory contains samples for Google Cloud Spanner. `Google Cloud Spanner` .. _Google Cloud Spanner: https://cloud.google.com/spanner/docs + Setup ------------------------------------------------------------------------------- + Authentication ++++++++++++++ @@ -28,6 +31,9 @@ credentials for applications. .. _Authentication Getting Started Guide: https://cloud.google.com/docs/authentication/getting-started + + + Install Dependencies ++++++++++++++++++++ @@ -42,7 +48,7 @@ Install Dependencies .. _Python Development Environment Setup Guide: https://cloud.google.com/python/setup -#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. +#. Create a virtualenv. Samples are compatible with Python 3.6+. .. code-block:: bash @@ -58,9 +64,15 @@ Install Dependencies .. _pip: https://pip.pypa.io/ .. _virtualenv: https://virtualenv.pypa.io/ + + + + + Samples ------------------------------------------------------------------------------- + Snippets +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ @@ -76,32 +88,10 @@ To run this sample: $ python snippets.py + usage: snippets.py [-h] [--database-id DATABASE_ID] instance_id - {create_database,insert_data,query_data,read_data, - read_stale_data,add_column,update_data, - query_data_with_new_column,read_write_transaction, - read_only_transaction,add_index,query_data_with_index, - read_data_with_index,add_storing_index, - read_data_with_storing_index, - create_table_with_timestamp,insert_data_with_timestamp, - add_timestamp_column,update_data_with_timestamp, - query_data_with_timestamp,write_struct_data, - query_with_struct,query_with_array_of_struct, - query_struct_field,query_nested_struct_field, - insert_data_with_dml,update_data_with_dml, - delete_data_with_dml,update_data_with_dml_timestamp, - dml_write_read_transaction,update_data_with_dml_struct, - insert_with_dml,query_data_with_parameter, - write_with_dml_transaction, - update_data_with_partitioned_dml, - delete_data_with_partitioned_dml,update_with_batch_dml, - create_table_with_datatypes,insert_datatypes_data, - query_data_with_array,query_data_with_bool, - query_data_with_bytes,query_data_with_date, - query_data_with_float,query_data_with_int, - query_data_with_string, - query_data_with_timestamp_parameter} + {create_instance,create_database,insert_data,delete_data,query_data,read_data,read_stale_data,add_column,update_data,query_data_with_new_column,read_write_transaction,read_only_transaction,add_index,query_data_with_index,read_data_with_index,add_storing_index,read_data_with_storing_index,create_table_with_timestamp,insert_data_with_timestamp,add_timestamp_column,update_data_with_timestamp,query_data_with_timestamp,write_struct_data,query_with_struct,query_with_array_of_struct,query_struct_field,query_nested_struct_field,insert_data_with_dml,update_data_with_dml,delete_data_with_dml,update_data_with_dml_timestamp,dml_write_read_transaction,update_data_with_dml_struct,insert_with_dml,query_data_with_parameter,write_with_dml_transaction,update_data_with_partitioned_dml,delete_data_with_partitioned_dml,update_with_batch_dml,create_table_with_datatypes,insert_datatypes_data,query_data_with_array,query_data_with_bool,query_data_with_bytes,query_data_with_date,query_data_with_float,query_data_with_int,query_data_with_string,query_data_with_timestamp_parameter,query_data_with_query_options,create_client_with_query_options} ... This application demonstrates how to do basic operations using Cloud @@ -111,32 +101,15 @@ To run this sample: positional arguments: instance_id Your Cloud Spanner instance ID. - {create_database, insert_data, delete_data, query_data, read_data, - read_stale_data, add_column, update_data, query_data_with_new_column, - read_write_transaction, read_only_transaction, add_index, - query_data_with_index, read_data_with_index, add_storing_index, - read_data_with_storing_index, create_table_with_timestamp, - insert_data_with_timestamp, add_timestamp_column, - update_data_with_timestamp, query_data_with_timestamp, - write_struct_data, query_with_struct, query_with_array_of_struct, - query_struct_field, query_nested_struct_field, insert_data_with_dml, - update_data_with_dml, delete_data_with_dml, - update_data_with_dml_timestamp, dml_write_read_transaction, - update_data_with_dml_struct, insert_with_dml, query_data_with_parameter, - write_with_dml_transaction, update_data_with_partitioned_dml, - delete_data_with_partitioned_dml, update_with_batch_dml, - create_table_with_datatypes, insert_datatypes_data, - query_data_with_array, query_data_with_bool, query_data_with_bytes, - query_data_with_date, query_data_with_float, query_data_with_int, - query_data_with_string, query_data_with_timestamp_parameter} + {create_instance,create_database,insert_data,delete_data,query_data,read_data,read_stale_data,add_column,update_data,query_data_with_new_column,read_write_transaction,read_only_transaction,add_index,query_data_with_index,read_data_with_index,add_storing_index,read_data_with_storing_index,create_table_with_timestamp,insert_data_with_timestamp,add_timestamp_column,update_data_with_timestamp,query_data_with_timestamp,write_struct_data,query_with_struct,query_with_array_of_struct,query_struct_field,query_nested_struct_field,insert_data_with_dml,update_data_with_dml,delete_data_with_dml,update_data_with_dml_timestamp,dml_write_read_transaction,update_data_with_dml_struct,insert_with_dml,query_data_with_parameter,write_with_dml_transaction,update_data_with_partitioned_dml,delete_data_with_partitioned_dml,update_with_batch_dml,create_table_with_datatypes,insert_datatypes_data,query_data_with_array,query_data_with_bool,query_data_with_bytes,query_data_with_date,query_data_with_float,query_data_with_int,query_data_with_string,query_data_with_timestamp_parameter,query_data_with_query_options,create_client_with_query_options} + create_instance Creates an instance. create_database Creates a database and tables for sample data. insert_data Inserts sample data into the given database. The database and table must already exist and can be created using `create_database`. delete_data Deletes sample data from the given database. The - database, table, and data must already exist and - can be created using `create_database` and - `insert_data`. + database, table, and data must already exist and can + be created using `create_database` and `insert_data`. query_data Queries sample data from the database using SQL. read_data Reads sample data from the database. read_stale_data Reads sample data from the database. The data is @@ -237,59 +210,53 @@ To run this sample: Deletes sample data from the database using a DML statement. update_data_with_dml_timestamp - Updates data with Timestamp from the database using - a DML statement. + Updates data with Timestamp from the database using a + DML statement. dml_write_read_transaction First inserts data then reads it from within a transaction using DML. update_data_with_dml_struct Updates data with a DML statement and STRUCT parameters. - insert_with_dml Inserts data with a DML statement into the - database. + insert_with_dml Inserts data with a DML statement into the database. query_data_with_parameter - Queries sample data from the database using SQL - with a parameter. + Queries sample data from the database using SQL with a + parameter. write_with_dml_transaction - Transfers part of a marketing budget from one - album to another. + Transfers part of a marketing budget from one album to + another. update_data_with_partitioned_dml - Update sample data with a partitioned DML - statement. + Update sample data with a partitioned DML statement. delete_data_with_partitioned_dml - Delete sample data with a partitioned DML - statement. + Delete sample data with a partitioned DML statement. update_with_batch_dml - Updates sample data in the database using Batch - DML. + Updates sample data in the database using Batch DML. create_table_with_datatypes Creates a table with supported dataypes. insert_datatypes_data Inserts data with supported datatypes into a table. query_data_with_array - Queries sample data using SQL with an ARRAY - parameter. + Queries sample data using SQL with an ARRAY parameter. query_data_with_bool - Queries sample data using SQL with a BOOL - parameter. + Queries sample data using SQL with a BOOL parameter. query_data_with_bytes - Queries sample data using SQL with a BYTES - parameter. + Queries sample data using SQL with a BYTES parameter. query_data_with_date - Queries sample data using SQL with a DATE - parameter. + Queries sample data using SQL with a DATE parameter. query_data_with_float Queries sample data using SQL with a FLOAT64 parameter. query_data_with_int - Queries sample data using SQL with a INT64 - parameter. + Queries sample data using SQL with a INT64 parameter. query_data_with_string - Queries sample data using SQL with a STRING - parameter. + Queries sample data using SQL with a STRING parameter. query_data_with_timestamp_parameter Queries sample data using SQL with a TIMESTAMP parameter. + query_data_with_query_options + Queries sample data using SQL with query options. + create_client_with_query_options + Create a client with query options. optional arguments: -h, --help show this help message and exit @@ -300,6 +267,10 @@ To run this sample: + + + + The client library ------------------------------------------------------------------------------- @@ -315,4 +286,5 @@ to `browse the source`_ and `report issues`_. https://github.com/GoogleCloudPlatform/google-cloud-python/issues -.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ diff --git a/samples/samples/backup_sample.py b/samples/samples/backup_sample.py index 76f04cb85c..29492c5872 100644 --- a/samples/samples/backup_sample.py +++ b/samples/samples/backup_sample.py @@ -216,9 +216,10 @@ def list_backups(instance_id, database_id, backup_id): print(backup.name) print("All backups with pagination") - for page in instance.list_backups(page_size=2).pages: - for backup in page: - print(backup.name) + # If there are multiple pages, additional ``ListBackup`` + # requests will be made as needed while iterating. + for backup in instance.list_backups(page_size=2): + print(backup.name) # [END spanner_list_backups] diff --git a/samples/samples/noxfile.py b/samples/samples/noxfile.py index 5660f08be4..01686e4a03 100644 --- a/samples/samples/noxfile.py +++ b/samples/samples/noxfile.py @@ -37,22 +37,24 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7"], + 'ignored_versions': ["2.7"], + # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - "envs": {}, + 'envs': {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append(".") + sys.path.append('.') from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -67,12 +69,12 @@ def get_pytest_env_vars(): ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG["gcloud_project_env"] + env_key = TEST_CONFIG['gcloud_project_env'] # This should error out if not set. - ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] + ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG["envs"]) + ret.update(TEST_CONFIG['envs']) return ret @@ -81,7 +83,7 @@ def get_pytest_env_vars(): ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] +IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) @@ -136,7 +138,7 @@ def lint(session): args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - ".", + "." ] session.run("flake8", *args) @@ -180,9 +182,9 @@ def py(session): if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip( - "SKIPPED: {} tests are disabled for this sample.".format(session.python) - ) + session.skip("SKIPPED: {} tests are disabled for this sample.".format( + session.python + )) # @@ -199,6 +201,11 @@ def _get_repo_root(): break if Path(p / ".git").exists(): return str(p) + # .git is not available in repos cloned via Cloud Build + # setup.py is always in the library's root, so use that instead + # https://github.com/googleapis/synthtool/issues/792 + if Path(p / "setup.py").exists(): + return str(p) p = p.parent raise Exception("Unable to detect repository root.") diff --git a/samples/samples/quickstart_test.py b/samples/samples/quickstart_test.py index d5c8d04160..9b9cbf5cc8 100644 --- a/samples/samples/quickstart_test.py +++ b/samples/samples/quickstart_test.py @@ -31,7 +31,7 @@ def new_instance(self, unused_instance_name): return original_instance(self, SPANNER_INSTANCE) instance_patch = mock.patch( - "google.cloud.spanner.Client.instance", side_effect=new_instance, autospec=True + "google.cloud.spanner_v1.Client.instance", side_effect=new_instance, autospec=True ) with instance_patch: diff --git a/samples/samples/snippets.py b/samples/samples/snippets.py index 4a47985031..f0379c0210 100644 --- a/samples/samples/snippets.py +++ b/samples/samples/snippets.py @@ -296,16 +296,14 @@ def query_data_with_index( ALTER TABLE Albums ADD COLUMN MarketingBudget INT64 """ - from google.cloud.spanner_v1.proto import type_pb2 - spanner_client = spanner.Client() instance = spanner_client.instance(instance_id) database = instance.database(database_id) params = {"start_title": start_title, "end_title": end_title} param_types = { - "start_title": type_pb2.Type(code=type_pb2.STRING), - "end_title": type_pb2.Type(code=type_pb2.STRING), + "start_title": spanner.param_types.STRING, + "end_title": spanner.param_types.STRING, } with database.snapshot() as snapshot: diff --git a/scripts/decrypt-secrets.sh b/scripts/decrypt-secrets.sh index ff599eb2af..21f6d2a26d 100755 --- a/scripts/decrypt-secrets.sh +++ b/scripts/decrypt-secrets.sh @@ -20,14 +20,27 @@ ROOT=$( dirname "$DIR" ) # Work from the project root. cd $ROOT +# Prevent it from overriding files. +# We recommend that sample authors use their own service account files and cloud project. +# In that case, they are supposed to prepare these files by themselves. +if [[ -f "testing/test-env.sh" ]] || \ + [[ -f "testing/service-account.json" ]] || \ + [[ -f "testing/client-secrets.json" ]]; then + echo "One or more target files exist, aborting." + exit 1 +fi + # Use SECRET_MANAGER_PROJECT if set, fallback to cloud-devrel-kokoro-resources. PROJECT_ID="${SECRET_MANAGER_PROJECT:-cloud-devrel-kokoro-resources}" gcloud secrets versions access latest --secret="python-docs-samples-test-env" \ + --project="${PROJECT_ID}" \ > testing/test-env.sh gcloud secrets versions access latest \ --secret="python-docs-samples-service-account" \ + --project="${PROJECT_ID}" \ > testing/service-account.json gcloud secrets versions access latest \ --secret="python-docs-samples-client-secrets" \ - > testing/client-secrets.json \ No newline at end of file + --project="${PROJECT_ID}" \ + > testing/client-secrets.json diff --git a/scripts/fixup_spanner_admin_database_v1_keywords.py b/scripts/fixup_spanner_admin_database_v1_keywords.py new file mode 100644 index 0000000000..9f1a9bb9f1 --- /dev/null +++ b/scripts/fixup_spanner_admin_database_v1_keywords.py @@ -0,0 +1,194 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class spanner_admin_databaseCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'create_backup': ('parent', 'backup_id', 'backup', ), + 'create_database': ('parent', 'create_statement', 'extra_statements', ), + 'delete_backup': ('name', ), + 'drop_database': ('database', ), + 'get_backup': ('name', ), + 'get_database': ('name', ), + 'get_database_ddl': ('database', ), + 'get_iam_policy': ('resource', 'options', ), + 'list_backup_operations': ('parent', 'filter', 'page_size', 'page_token', ), + 'list_backups': ('parent', 'filter', 'page_size', 'page_token', ), + 'list_database_operations': ('parent', 'filter', 'page_size', 'page_token', ), + 'list_databases': ('parent', 'page_size', 'page_token', ), + 'restore_database': ('parent', 'database_id', 'backup', ), + 'set_iam_policy': ('resource', 'policy', ), + 'test_iam_permissions': ('resource', 'permissions', ), + 'update_backup': ('backup', 'update_mask', ), + 'update_database_ddl': ('database', 'statements', 'operation_id', ), + + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), + cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=spanner_admin_databaseCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the spanner_admin_database client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/scripts/fixup_spanner_admin_instance_v1_keywords.py b/scripts/fixup_spanner_admin_instance_v1_keywords.py new file mode 100644 index 0000000000..0871592c96 --- /dev/null +++ b/scripts/fixup_spanner_admin_instance_v1_keywords.py @@ -0,0 +1,187 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class spanner_admin_instanceCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'create_instance': ('parent', 'instance_id', 'instance', ), + 'delete_instance': ('name', ), + 'get_iam_policy': ('resource', 'options', ), + 'get_instance': ('name', 'field_mask', ), + 'get_instance_config': ('name', ), + 'list_instance_configs': ('parent', 'page_size', 'page_token', ), + 'list_instances': ('parent', 'page_size', 'page_token', 'filter', ), + 'set_iam_policy': ('resource', 'policy', ), + 'test_iam_permissions': ('resource', 'permissions', ), + 'update_instance': ('instance', 'field_mask', ), + + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), + cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=spanner_admin_instanceCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the spanner_admin_instance client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/scripts/fixup_spanner_v1_keywords.py b/scripts/fixup_spanner_v1_keywords.py new file mode 100644 index 0000000000..7c83aaf33d --- /dev/null +++ b/scripts/fixup_spanner_v1_keywords.py @@ -0,0 +1,192 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class spannerCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'batch_create_sessions': ('database', 'session_count', 'session_template', ), + 'begin_transaction': ('session', 'options', ), + 'commit': ('session', 'transaction_id', 'single_use_transaction', 'mutations', ), + 'create_session': ('database', 'session', ), + 'delete_session': ('name', ), + 'execute_batch_dml': ('session', 'transaction', 'statements', 'seqno', ), + 'execute_sql': ('session', 'sql', 'transaction', 'params', 'param_types', 'resume_token', 'query_mode', 'partition_token', 'seqno', 'query_options', ), + 'execute_streaming_sql': ('session', 'sql', 'transaction', 'params', 'param_types', 'resume_token', 'query_mode', 'partition_token', 'seqno', 'query_options', ), + 'get_session': ('name', ), + 'list_sessions': ('database', 'page_size', 'page_token', 'filter', ), + 'partition_query': ('session', 'sql', 'transaction', 'params', 'param_types', 'partition_options', ), + 'partition_read': ('session', 'table', 'key_set', 'transaction', 'index', 'columns', 'partition_options', ), + 'read': ('session', 'table', 'columns', 'key_set', 'transaction', 'index', 'limit', 'resume_token', 'partition_token', ), + 'rollback': ('session', 'transaction_id', ), + 'streaming_read': ('session', 'table', 'columns', 'key_set', 'transaction', 'index', 'limit', 'resume_token', 'partition_token', ), + + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), + cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=spannerCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the spanner client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/setup.py b/setup.py index 8495c20bef..f1a5adec45 100644 --- a/setup.py +++ b/setup.py @@ -22,16 +22,18 @@ name = "google-cloud-spanner" description = "Cloud Spanner API client library" -version = "1.19.1" +version = "2.0.0" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' # 'Development Status :: 5 - Production/Stable' release_status = "Development Status :: 5 - Production/Stable" dependencies = [ - "google-api-core[grpc, grpcgcp] >= 1.14.0, < 2.0.0dev", + "google-api-core[grpc] >= 1.22.0, < 2.0.0dev", "google-cloud-core >= 1.4.1, < 2.0dev", "grpc-google-iam-v1 >= 0.12.3, < 0.13dev", + "proto-plus == 1.10.0-dev2", + "libcst >= 0.2.5", ] extras = { "tracing": [ @@ -53,7 +55,9 @@ # Only include packages under the 'google' namespace. Do not include tests, # benchmarks, etc. packages = [ - package for package in setuptools.find_packages() if package.startswith("google") + package + for package in setuptools.PEP420PackageFinder.find() + if package.startswith("google") ] # Determine which namespaces are needed. @@ -76,12 +80,10 @@ "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python", - "Programming Language :: Python :: 2", - "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", "Operating System :: OS Independent", "Topic :: Internet", ], @@ -90,7 +92,7 @@ namespace_packages=namespaces, install_requires=dependencies, extras_require=extras, - python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*", + python_requires=">=3.6", include_package_data=True, zip_safe=False, ) diff --git a/google/cloud/spanner_admin_database_v1/gapic/transports/__init__.py b/stale_outputs_checked similarity index 100% rename from google/cloud/spanner_admin_database_v1/gapic/transports/__init__.py rename to stale_outputs_checked diff --git a/synth.metadata b/synth.metadata index 14a9cac219..bba4518649 100644 --- a/synth.metadata +++ b/synth.metadata @@ -3,30 +3,22 @@ { "git": { "name": ".", - "remote": "https://github.com/googleapis/python-spanner.git", - "sha": "891077105d5093a73caf96683d10afef2cd17823" - } - }, - { - "git": { - "name": "googleapis", - "remote": "https://github.com/googleapis/googleapis.git", - "sha": "6fd07563a2f1a6785066f5955ad9659a315e4492", - "internalRef": "324941614" + "remote": "git@github.com:larkee/python-spanner.git", + "sha": "1d3e65af688c31937b0110223679607c19c328e9" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "4f8f5dc24af79694887385015294e4dbb214c352" + "sha": "a783321fd55f010709294455584a553f4b24b944" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "4f8f5dc24af79694887385015294e4dbb214c352" + "sha": "a783321fd55f010709294455584a553f4b24b944" } } ], diff --git a/synth.py b/synth.py index bf0c2f1b63..d13ddb67a5 100644 --- a/synth.py +++ b/synth.py @@ -30,48 +30,7 @@ include_protos=True, ) -s.move(library / "google/cloud/spanner_v1/proto") -s.move(library / "google/cloud/spanner_v1/gapic") -s.move(library / "tests") - -# Add grpcio-gcp options -s.replace( - "google/cloud/spanner_v1/gapic/transports/spanner_grpc_transport.py", - "import google.api_core.grpc_helpers\n", - "import pkg_resources\n" - "import grpc_gcp\n" - "\n" - "import google.api_core.grpc_helpers\n", -) -s.replace( - "google/cloud/spanner_v1/gapic/transports/spanner_grpc_transport.py", - "from google.cloud.spanner_v1.proto import spanner_pb2_grpc\n", - "\g<0>\n\n_GRPC_KEEPALIVE_MS = 2 * 60 * 1000\n" - "_SPANNER_GRPC_CONFIG = 'spanner.grpc.config'\n", -) - -s.replace( - "google/cloud/spanner_v1/gapic/transports/spanner_grpc_transport.py", - "(\s+)'grpc.max_receive_message_length': -1,", - "\g<0>\g<1>\"grpc.keepalive_time_ms\": _GRPC_KEEPALIVE_MS,", -) - -s.replace( - "google/cloud/spanner_v1/gapic/transports/spanner_grpc_transport.py", - "(\s+)return google.api_core.grpc_helpers.create_channel\(\n", - "\g<1>grpc_gcp_config = grpc_gcp.api_config_from_text_pb(" - "\g<1> pkg_resources.resource_string(__name__, _SPANNER_GRPC_CONFIG))" - "\g<1>options = [(grpc_gcp.API_CONFIG_CHANNEL_ARG, grpc_gcp_config)]" - "\g<1>if 'options' in kwargs:" - "\g<1> options.extend(kwargs['options'])" - "\g<1>kwargs['options'] = options" - "\g<0>", -) -s.replace( - "tests/unit/gapic/v1/test_spanner_client_v1.py", - "from google.cloud import spanner_v1", - "from google.cloud.spanner_v1.gapic import spanner_client as spanner_v1", -) +s.move(library, excludes=["google/cloud/spanner/**", "*.*", "docs/index.rst", "google/cloud/spanner_v1/__init__.py"]) # ---------------------------------------------------------------------------- # Generate instance admin client @@ -83,28 +42,7 @@ include_protos=True, ) -s.move(library / "google/cloud/spanner_admin_instance_v1/gapic") -s.move(library / "google/cloud/spanner_admin_instance_v1/proto") -s.move(library / "tests") - -# Fix up the _GAPIC_LIBRARY_VERSION targets -s.replace( - "google/cloud/spanner_admin_instance_v1/gapic/instance_admin_client.py", - "'google-cloud-spanner-admin-instance'", - "'google-cloud-spanner'", -) - -# Fix up generated imports -s.replace( - "google/**/*.py", - "from google\.cloud\.spanner\.admin\.instance_v1.proto", - "from google.cloud.spanner_admin_instance_v1.proto", -) - -# Fix docstrings -s.replace("google/cloud/spanner_v1/proto/transaction_pb2.py", r"""====*""", r"") -s.replace("google/cloud/spanner_v1/proto/transaction_pb2.py", r"""----*""", r"") -s.replace("google/cloud/spanner_v1/proto/transaction_pb2.py", r"""~~~~*""", r"") +s.move(library, excludes=["google/cloud/spanner_admin_instance/**", "*.*", "docs/index.rst"]) # ---------------------------------------------------------------------------- # Generate database admin client @@ -116,54 +54,23 @@ include_protos=True, ) -s.move(library / "google/cloud/spanner_admin_database_v1/gapic") -s.move(library / "google/cloud/spanner_admin_database_v1/proto") -s.move(library / "tests") - -# Fix up the _GAPIC_LIBRARY_VERSION targets -s.replace( - "google/cloud/spanner_admin_database_v1/gapic/database_admin_client.py", - "'google-cloud-spanner-admin-database'", - "'google-cloud-spanner'", -) +s.move(library, excludes=["google/cloud/spanner_admin_database/**", "*.*", "docs/index.rst"]) -# Fix up the _GAPIC_LIBRARY_VERSION targets +# Fix formatting for bullet lists. +# See: https://github.com/googleapis/gapic-generator-python/issues/604 s.replace( - "google/**/*.py", - "from google\.cloud\.spanner\.admin\.database_v1.proto", - "from google.cloud.spanner_admin_database_v1.proto", + "google/cloud/spanner_admin_database_v1/services/database_admin/*.py", + "``backup.expire_time``.", + "``backup.expire_time``.\n" ) -# Fix up proto docs that are missing summary line. -s.replace( - "google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin_pb2.py", - '"""Attributes:', - '"""Protocol buffer.\n\n Attributes:', -) - -# Fix LRO return types -s.replace("google/cloud/spanner_admin_instance_v1/gapic/instance_admin_client.py", - "cloud.spanner_admin_instance_v1.types._OperationFuture", - "api_core.operation.Operation") -s.replace("google/cloud/spanner_admin_database_v1/gapic/database_admin_client.py", - "cloud.spanner_admin_database_v1.types._OperationFuture", - "api_core.operation.Operation") - # ---------------------------------------------------------------------------- # Add templated files # ---------------------------------------------------------------------------- -templated_files = common.py_library(unit_cov_level=97, cov_level=99, samples=True) -s.move(templated_files, excludes=["noxfile.py"]) - -# Template's MANIFEST.in does not include the needed GAPIC config file. -# See PR #6928. -s.replace( - "MANIFEST.in", - "include README.rst LICENSE\n", - "include README.rst LICENSE\n" - "include google/cloud/spanner_v1/gapic/transports/spanner.grpc.config\n", -) +templated_files = common.py_library(microgenerator=True, samples=True) +s.move(templated_files, excludes=[".coveragerc", "noxfile.py"]) +# Ensure CI runs on a new instance each time s.replace( ".kokoro/build.sh", "# Remove old nox", diff --git a/tests/_helpers.py b/tests/_helpers.py index 6ebc4bb374..036c777845 100644 --- a/tests/_helpers.py +++ b/tests/_helpers.py @@ -47,4 +47,4 @@ def assertSpanAttributes( self.assertEqual(span.name, name) self.assertEqual(span.status.canonical_code, status) - self.assertEqual(span.attributes, attributes) + self.assertEqual(dict(span.attributes), attributes) diff --git a/tests/system/test_system.py b/tests/system/test_system.py index 65cc0ef1f9..1ba9b59163 100644 --- a/tests/system/test_system.py +++ b/tests/system/test_system.py @@ -31,23 +31,15 @@ from google.api_core.datetime_helpers import DatetimeWithNanoseconds from google.cloud.spanner_v1 import param_types -from google.cloud.spanner_v1.proto.type_pb2 import ARRAY -from google.cloud.spanner_v1.proto.type_pb2 import BOOL -from google.cloud.spanner_v1.proto.type_pb2 import BYTES -from google.cloud.spanner_v1.proto.type_pb2 import DATE -from google.cloud.spanner_v1.proto.type_pb2 import FLOAT64 -from google.cloud.spanner_v1.proto.type_pb2 import INT64 -from google.cloud.spanner_v1.proto.type_pb2 import STRING -from google.cloud.spanner_v1.proto.type_pb2 import TIMESTAMP -from google.cloud.spanner_v1.proto.type_pb2 import NUMERIC -from google.cloud.spanner_v1.proto.type_pb2 import Type +from google.cloud.spanner_v1 import TypeCode +from google.cloud.spanner_v1 import Type from google.cloud._helpers import UTC -from google.cloud.spanner import Client -from google.cloud.spanner import KeyRange -from google.cloud.spanner import KeySet -from google.cloud.spanner import BurstyPool -from google.cloud.spanner import COMMIT_TIMESTAMP +from google.cloud.spanner_v1 import Client +from google.cloud.spanner_v1 import KeyRange +from google.cloud.spanner_v1 import KeySet +from google.cloud.spanner_v1 import BurstyPool +from google.cloud.spanner_v1 import COMMIT_TIMESTAMP from test_utils.retry import RetryErrors from test_utils.retry import RetryInstanceState @@ -74,8 +66,8 @@ BASE_ATTRIBUTES = { "db.type": "spanner", - "db.url": "spanner.googleapis.com:443", - "net.host.name": "spanner.googleapis.com:443", + "db.url": "spanner.googleapis.com", + "net.host.name": "spanner.googleapis.com", } _STATUS_CODE_TO_GRPC_STATUS_CODE = { @@ -325,10 +317,8 @@ def test_create_database(self): # We want to make sure the operation completes. operation.result(30) # raises on failure / timeout. - database_ids = [ - database.database_id for database in Config.INSTANCE.list_databases() - ] - self.assertIn(temp_db_id, database_ids) + database_ids = [database.name for database in Config.INSTANCE.list_databases()] + self.assertIn(temp_db.name, database_ids) def test_table_not_found(self): temp_db_id = "temp_db" + unique_resource_id("_") @@ -730,11 +720,10 @@ def test_list_backups(self): self.assertEqual(backup.name, backup2.name) # List backups using pagination. - for page in instance.list_backups(page_size=1).pages: - count = 0 - for backup in page: - count += 1 - self.assertEqual(count, 1) + count = 0 + for page in instance.list_backups(page_size=1): + count += 1 + self.assertEqual(count, 2) SOME_DATE = datetime.date(2011, 1, 17) @@ -1285,12 +1274,12 @@ def test_transaction_batch_update_success(self): update_statement = ( "UPDATE contacts SET email = @email " "WHERE contact_id = @contact_id;", {"contact_id": 1, "email": "phreddy@example.com"}, - {"contact_id": Type(code=INT64), "email": Type(code=STRING)}, + {"contact_id": param_types.INT64, "email": param_types.STRING}, ) delete_statement = ( "DELETE contacts WHERE contact_id = @contact_id;", {"contact_id": 1}, - {"contact_id": Type(code=INT64)}, + {"contact_id": param_types.INT64}, ) def unit_of_work(transaction, self): @@ -1328,7 +1317,7 @@ def test_transaction_batch_update_and_execute_dml(self): ( "UPDATE contacts SET email = @email " "WHERE contact_id = @contact_id;", {"contact_id": 1, "email": "phreddy@example.com"}, - {"contact_id": Type(code=INT64), "email": Type(code=STRING)}, + {"contact_id": param_types.INT64, "email": param_types.STRING}, ) ] @@ -1370,12 +1359,12 @@ def test_transaction_batch_update_w_syntax_error(self): update_statement = ( "UPDTAE contacts SET email = @email " "WHERE contact_id = @contact_id;", {"contact_id": 1, "email": "phreddy@example.com"}, - {"contact_id": Type(code=INT64), "email": Type(code=STRING)}, + {"contact_id": param_types.INT64, "email": param_types.STRING}, ) delete_statement = ( "DELETE contacts WHERE contact_id = @contact_id;", {"contact_id": 1}, - {"contact_id": Type(code=INT64)}, + {"contact_id": param_types.INT64}, ) def unit_of_work(transaction): @@ -1427,12 +1416,12 @@ def test_transaction_batch_update_w_parent_span(self): update_statement = ( "UPDATE contacts SET email = @email " "WHERE contact_id = @contact_id;", {"contact_id": 1, "email": "phreddy@example.com"}, - {"contact_id": Type(code=INT64), "email": Type(code=STRING)}, + {"contact_id": param_types.INT64, "email": param_types.STRING}, ) delete_statement = ( "DELETE contacts WHERE contact_id = @contact_id;", {"contact_id": 1}, - {"contact_id": Type(code=INT64)}, + {"contact_id": param_types.INT64}, ) def unit_of_work(transaction, self): @@ -1493,7 +1482,7 @@ def _setup_table(txn): row_count = self._db.execute_partitioned_dml( update_statement, params={"email": nonesuch, "target": target}, - param_types={"email": Type(code=STRING), "target": Type(code=STRING)}, + param_types={"email": param_types.STRING, "target": param_types.STRING}, ) self.assertEqual(row_count, 1) @@ -1567,7 +1556,7 @@ def _query_w_concurrent_update(self, transaction, pkey): SQL = "SELECT * FROM counters WHERE name = @name" rows = list( transaction.execute_sql( - SQL, params={"name": pkey}, param_types={"name": Type(code=STRING)} + SQL, params={"name": pkey}, param_types={"name": param_types.STRING} ) ) self.assertEqual(len(rows), 1) @@ -2243,7 +2232,7 @@ def _bind_test_helper( ) # Bind an array of - array_type = Type(code=ARRAY, array_element_type=Type(code=type_name)) + array_type = Type(code=TypeCode.ARRAY, array_element_type=Type(code=type_name)) if expected_array_value is None: expected_array_value = array_value @@ -2278,16 +2267,16 @@ def _bind_test_helper( ) def test_execute_sql_w_string_bindings(self): - self._bind_test_helper(STRING, "Phred", ["Phred", "Bharney"]) + self._bind_test_helper(TypeCode.STRING, "Phred", ["Phred", "Bharney"]) def test_execute_sql_w_bool_bindings(self): - self._bind_test_helper(BOOL, True, [True, False, True]) + self._bind_test_helper(TypeCode.BOOL, True, [True, False, True]) def test_execute_sql_w_int64_bindings(self): - self._bind_test_helper(INT64, 42, [123, 456, 789]) + self._bind_test_helper(TypeCode.INT64, 42, [123, 456, 789]) def test_execute_sql_w_float64_bindings(self): - self._bind_test_helper(FLOAT64, 42.3, [12.3, 456.0, 7.89]) + self._bind_test_helper(TypeCode.FLOAT64, 42.3, [12.3, 456.0, 7.89]) def test_execute_sql_w_float_bindings_transfinite(self): @@ -2296,7 +2285,7 @@ def test_execute_sql_w_float_bindings_transfinite(self): self._db, sql="SELECT @neg_inf", params={"neg_inf": NEG_INF}, - param_types={"neg_inf": Type(code=FLOAT64)}, + param_types={"neg_inf": param_types.FLOAT64}, expected=[(NEG_INF,)], order=False, ) @@ -2306,13 +2295,13 @@ def test_execute_sql_w_float_bindings_transfinite(self): self._db, sql="SELECT @pos_inf", params={"pos_inf": POS_INF}, - param_types={"pos_inf": Type(code=FLOAT64)}, + param_types={"pos_inf": param_types.FLOAT64}, expected=[(POS_INF,)], order=False, ) def test_execute_sql_w_bytes_bindings(self): - self._bind_test_helper(BYTES, b"DEADBEEF", [b"FACEDACE", b"DEADBEEF"]) + self._bind_test_helper(TypeCode.BYTES, b"DEADBEEF", [b"FACEDACE", b"DEADBEEF"]) def test_execute_sql_w_timestamp_bindings(self): import pytz @@ -2334,17 +2323,19 @@ def test_execute_sql_w_timestamp_bindings(self): ] self._recurse_into_lists = False - self._bind_test_helper(TIMESTAMP, timestamp_1, timestamps, expected_timestamps) + self._bind_test_helper( + TypeCode.TIMESTAMP, timestamp_1, timestamps, expected_timestamps + ) def test_execute_sql_w_date_bindings(self): import datetime dates = [SOME_DATE, SOME_DATE + datetime.timedelta(days=1)] - self._bind_test_helper(DATE, SOME_DATE, dates) + self._bind_test_helper(TypeCode.DATE, SOME_DATE, dates) @unittest.skipIf(USE_EMULATOR, "Skipping NUMERIC") def test_execute_sql_w_numeric_bindings(self): - self._bind_test_helper(NUMERIC, NUMERIC_1, [NUMERIC_1, NUMERIC_2]) + self._bind_test_helper(TypeCode.NUMERIC, NUMERIC_1, [NUMERIC_1, NUMERIC_2]) def test_execute_sql_w_query_param_struct(self): NAME = "Phred" diff --git a/tests/unit/gapic/spanner_admin_database_v1/__init__.py b/tests/unit/gapic/spanner_admin_database_v1/__init__.py new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/tests/unit/gapic/spanner_admin_database_v1/__init__.py @@ -0,0 +1 @@ + diff --git a/tests/unit/gapic/spanner_admin_database_v1/test_database_admin.py b/tests/unit/gapic/spanner_admin_database_v1/test_database_admin.py new file mode 100644 index 0000000000..ea79f63e86 --- /dev/null +++ b/tests/unit/gapic/spanner_admin_database_v1/test_database_admin.py @@ -0,0 +1,5050 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.spanner_admin_database_v1.services.database_admin import ( + DatabaseAdminAsyncClient, +) +from google.cloud.spanner_admin_database_v1.services.database_admin import ( + DatabaseAdminClient, +) +from google.cloud.spanner_admin_database_v1.services.database_admin import pagers +from google.cloud.spanner_admin_database_v1.services.database_admin import transports +from google.cloud.spanner_admin_database_v1.types import backup +from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup +from google.cloud.spanner_admin_database_v1.types import spanner_database_admin +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import options_pb2 as options # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 as operations # type: ignore +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore +from google.type import expr_pb2 as expr # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert DatabaseAdminClient._get_default_mtls_endpoint(None) is None + assert ( + DatabaseAdminClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + DatabaseAdminClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + DatabaseAdminClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + DatabaseAdminClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + DatabaseAdminClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class", [DatabaseAdminClient, DatabaseAdminAsyncClient] +) +def test_database_admin_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + + assert client.transport._host == "spanner.googleapis.com:443" + + +def test_database_admin_client_get_transport_class(): + transport = DatabaseAdminClient.get_transport_class() + assert transport == transports.DatabaseAdminGrpcTransport + + transport = DatabaseAdminClient.get_transport_class("grpc") + assert transport == transports.DatabaseAdminGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (DatabaseAdminClient, transports.DatabaseAdminGrpcTransport, "grpc"), + ( + DatabaseAdminAsyncClient, + transports.DatabaseAdminGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + DatabaseAdminClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(DatabaseAdminClient), +) +@mock.patch.object( + DatabaseAdminAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(DatabaseAdminAsyncClient), +) +def test_database_admin_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(DatabaseAdminClient, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(DatabaseAdminClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + (DatabaseAdminClient, transports.DatabaseAdminGrpcTransport, "grpc", "true"), + ( + DatabaseAdminAsyncClient, + transports.DatabaseAdminGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + (DatabaseAdminClient, transports.DatabaseAdminGrpcTransport, "grpc", "false"), + ( + DatabaseAdminAsyncClient, + transports.DatabaseAdminGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + DatabaseAdminClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(DatabaseAdminClient), +) +@mock.patch.object( + DatabaseAdminAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(DatabaseAdminAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_database_admin_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + ssl_channel_creds = mock.Mock() + with mock.patch( + "grpc.ssl_channel_credentials", return_value=ssl_channel_creds + ): + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_ssl_channel_creds = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_ssl_channel_creds = ssl_channel_creds + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.ssl_credentials", + new_callable=mock.PropertyMock, + ) as ssl_credentials_mock: + if use_client_cert_env == "false": + is_mtls_mock.return_value = False + ssl_credentials_mock.return_value = None + expected_host = client.DEFAULT_ENDPOINT + expected_ssl_channel_creds = None + else: + is_mtls_mock.return_value = True + ssl_credentials_mock.return_value = mock.Mock() + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_ssl_channel_creds = ( + ssl_credentials_mock.return_value + ) + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + is_mtls_mock.return_value = False + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (DatabaseAdminClient, transports.DatabaseAdminGrpcTransport, "grpc"), + ( + DatabaseAdminAsyncClient, + transports.DatabaseAdminGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_database_admin_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (DatabaseAdminClient, transports.DatabaseAdminGrpcTransport, "grpc"), + ( + DatabaseAdminAsyncClient, + transports.DatabaseAdminGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_database_admin_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_database_admin_client_client_options_from_dict(): + with mock.patch( + "google.cloud.spanner_admin_database_v1.services.database_admin.transports.DatabaseAdminGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = DatabaseAdminClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_list_databases( + transport: str = "grpc", request_type=spanner_database_admin.ListDatabasesRequest +): + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_databases), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = spanner_database_admin.ListDatabasesResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_databases(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_database_admin.ListDatabasesRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListDatabasesPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_databases_from_dict(): + test_list_databases(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_databases_async( + transport: str = "grpc_asyncio", + request_type=spanner_database_admin.ListDatabasesRequest, +): + client = DatabaseAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_databases), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner_database_admin.ListDatabasesResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_databases(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_database_admin.ListDatabasesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDatabasesAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_databases_async_from_dict(): + await test_list_databases_async(request_type=dict) + + +def test_list_databases_field_headers(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner_database_admin.ListDatabasesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_databases), "__call__") as call: + call.return_value = spanner_database_admin.ListDatabasesResponse() + + client.list_databases(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_databases_field_headers_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner_database_admin.ListDatabasesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_databases), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner_database_admin.ListDatabasesResponse() + ) + + await client.list_databases(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_databases_flattened(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_databases), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = spanner_database_admin.ListDatabasesResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_databases(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_databases_flattened_error(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_databases( + spanner_database_admin.ListDatabasesRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_databases_flattened_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_databases), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = spanner_database_admin.ListDatabasesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner_database_admin.ListDatabasesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_databases(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_databases_flattened_error_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_databases( + spanner_database_admin.ListDatabasesRequest(), parent="parent_value", + ) + + +def test_list_databases_pager(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_databases), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + spanner_database_admin.ListDatabasesResponse( + databases=[ + spanner_database_admin.Database(), + spanner_database_admin.Database(), + spanner_database_admin.Database(), + ], + next_page_token="abc", + ), + spanner_database_admin.ListDatabasesResponse( + databases=[], next_page_token="def", + ), + spanner_database_admin.ListDatabasesResponse( + databases=[spanner_database_admin.Database(),], next_page_token="ghi", + ), + spanner_database_admin.ListDatabasesResponse( + databases=[ + spanner_database_admin.Database(), + spanner_database_admin.Database(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_databases(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, spanner_database_admin.Database) for i in results) + + +def test_list_databases_pages(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_databases), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + spanner_database_admin.ListDatabasesResponse( + databases=[ + spanner_database_admin.Database(), + spanner_database_admin.Database(), + spanner_database_admin.Database(), + ], + next_page_token="abc", + ), + spanner_database_admin.ListDatabasesResponse( + databases=[], next_page_token="def", + ), + spanner_database_admin.ListDatabasesResponse( + databases=[spanner_database_admin.Database(),], next_page_token="ghi", + ), + spanner_database_admin.ListDatabasesResponse( + databases=[ + spanner_database_admin.Database(), + spanner_database_admin.Database(), + ], + ), + RuntimeError, + ) + pages = list(client.list_databases(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_databases_async_pager(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_databases), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + spanner_database_admin.ListDatabasesResponse( + databases=[ + spanner_database_admin.Database(), + spanner_database_admin.Database(), + spanner_database_admin.Database(), + ], + next_page_token="abc", + ), + spanner_database_admin.ListDatabasesResponse( + databases=[], next_page_token="def", + ), + spanner_database_admin.ListDatabasesResponse( + databases=[spanner_database_admin.Database(),], next_page_token="ghi", + ), + spanner_database_admin.ListDatabasesResponse( + databases=[ + spanner_database_admin.Database(), + spanner_database_admin.Database(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_databases(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, spanner_database_admin.Database) for i in responses) + + +@pytest.mark.asyncio +async def test_list_databases_async_pages(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_databases), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + spanner_database_admin.ListDatabasesResponse( + databases=[ + spanner_database_admin.Database(), + spanner_database_admin.Database(), + spanner_database_admin.Database(), + ], + next_page_token="abc", + ), + spanner_database_admin.ListDatabasesResponse( + databases=[], next_page_token="def", + ), + spanner_database_admin.ListDatabasesResponse( + databases=[spanner_database_admin.Database(),], next_page_token="ghi", + ), + spanner_database_admin.ListDatabasesResponse( + databases=[ + spanner_database_admin.Database(), + spanner_database_admin.Database(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_databases(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_create_database( + transport: str = "grpc", request_type=spanner_database_admin.CreateDatabaseRequest +): + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_database), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.create_database(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_database_admin.CreateDatabaseRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_database_from_dict(): + test_create_database(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_database_async( + transport: str = "grpc_asyncio", + request_type=spanner_database_admin.CreateDatabaseRequest, +): + client = DatabaseAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_database), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.create_database(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_database_admin.CreateDatabaseRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_database_async_from_dict(): + await test_create_database_async(request_type=dict) + + +def test_create_database_field_headers(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner_database_admin.CreateDatabaseRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_database), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.create_database(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_database_field_headers_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner_database_admin.CreateDatabaseRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_database), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.create_database(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_database_flattened(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_database), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_database( + parent="parent_value", create_statement="create_statement_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].create_statement == "create_statement_value" + + +def test_create_database_flattened_error(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_database( + spanner_database_admin.CreateDatabaseRequest(), + parent="parent_value", + create_statement="create_statement_value", + ) + + +@pytest.mark.asyncio +async def test_create_database_flattened_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_database), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_database( + parent="parent_value", create_statement="create_statement_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].create_statement == "create_statement_value" + + +@pytest.mark.asyncio +async def test_create_database_flattened_error_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_database( + spanner_database_admin.CreateDatabaseRequest(), + parent="parent_value", + create_statement="create_statement_value", + ) + + +def test_get_database( + transport: str = "grpc", request_type=spanner_database_admin.GetDatabaseRequest +): + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_database), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = spanner_database_admin.Database( + name="name_value", state=spanner_database_admin.Database.State.CREATING, + ) + + response = client.get_database(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_database_admin.GetDatabaseRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, spanner_database_admin.Database) + + assert response.name == "name_value" + + assert response.state == spanner_database_admin.Database.State.CREATING + + +def test_get_database_from_dict(): + test_get_database(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_database_async( + transport: str = "grpc_asyncio", + request_type=spanner_database_admin.GetDatabaseRequest, +): + client = DatabaseAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_database), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner_database_admin.Database( + name="name_value", state=spanner_database_admin.Database.State.CREATING, + ) + ) + + response = await client.get_database(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_database_admin.GetDatabaseRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, spanner_database_admin.Database) + + assert response.name == "name_value" + + assert response.state == spanner_database_admin.Database.State.CREATING + + +@pytest.mark.asyncio +async def test_get_database_async_from_dict(): + await test_get_database_async(request_type=dict) + + +def test_get_database_field_headers(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner_database_admin.GetDatabaseRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_database), "__call__") as call: + call.return_value = spanner_database_admin.Database() + + client.get_database(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_database_field_headers_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner_database_admin.GetDatabaseRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_database), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner_database_admin.Database() + ) + + await client.get_database(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_database_flattened(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_database), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = spanner_database_admin.Database() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_database(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_database_flattened_error(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_database( + spanner_database_admin.GetDatabaseRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_database_flattened_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_database), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = spanner_database_admin.Database() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner_database_admin.Database() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_database(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_database_flattened_error_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_database( + spanner_database_admin.GetDatabaseRequest(), name="name_value", + ) + + +def test_update_database_ddl( + transport: str = "grpc", + request_type=spanner_database_admin.UpdateDatabaseDdlRequest, +): + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_database_ddl), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.update_database_ddl(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_database_admin.UpdateDatabaseDdlRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_database_ddl_from_dict(): + test_update_database_ddl(request_type=dict) + + +@pytest.mark.asyncio +async def test_update_database_ddl_async( + transport: str = "grpc_asyncio", + request_type=spanner_database_admin.UpdateDatabaseDdlRequest, +): + client = DatabaseAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_database_ddl), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.update_database_ddl(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_database_admin.UpdateDatabaseDdlRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_database_ddl_async_from_dict(): + await test_update_database_ddl_async(request_type=dict) + + +def test_update_database_ddl_field_headers(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner_database_admin.UpdateDatabaseDdlRequest() + request.database = "database/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_database_ddl), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.update_database_ddl(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "database=database/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_database_ddl_field_headers_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner_database_admin.UpdateDatabaseDdlRequest() + request.database = "database/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_database_ddl), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.update_database_ddl(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "database=database/value",) in kw["metadata"] + + +def test_update_database_ddl_flattened(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_database_ddl), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_database_ddl( + database="database_value", statements=["statements_value"], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].database == "database_value" + + assert args[0].statements == ["statements_value"] + + +def test_update_database_ddl_flattened_error(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_database_ddl( + spanner_database_admin.UpdateDatabaseDdlRequest(), + database="database_value", + statements=["statements_value"], + ) + + +@pytest.mark.asyncio +async def test_update_database_ddl_flattened_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_database_ddl), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_database_ddl( + database="database_value", statements=["statements_value"], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].database == "database_value" + + assert args[0].statements == ["statements_value"] + + +@pytest.mark.asyncio +async def test_update_database_ddl_flattened_error_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_database_ddl( + spanner_database_admin.UpdateDatabaseDdlRequest(), + database="database_value", + statements=["statements_value"], + ) + + +def test_drop_database( + transport: str = "grpc", request_type=spanner_database_admin.DropDatabaseRequest +): + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.drop_database), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.drop_database(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_database_admin.DropDatabaseRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_drop_database_from_dict(): + test_drop_database(request_type=dict) + + +@pytest.mark.asyncio +async def test_drop_database_async( + transport: str = "grpc_asyncio", + request_type=spanner_database_admin.DropDatabaseRequest, +): + client = DatabaseAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.drop_database), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.drop_database(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_database_admin.DropDatabaseRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_drop_database_async_from_dict(): + await test_drop_database_async(request_type=dict) + + +def test_drop_database_field_headers(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner_database_admin.DropDatabaseRequest() + request.database = "database/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.drop_database), "__call__") as call: + call.return_value = None + + client.drop_database(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "database=database/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_drop_database_field_headers_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner_database_admin.DropDatabaseRequest() + request.database = "database/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.drop_database), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.drop_database(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "database=database/value",) in kw["metadata"] + + +def test_drop_database_flattened(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.drop_database), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.drop_database(database="database_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].database == "database_value" + + +def test_drop_database_flattened_error(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.drop_database( + spanner_database_admin.DropDatabaseRequest(), database="database_value", + ) + + +@pytest.mark.asyncio +async def test_drop_database_flattened_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.drop_database), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.drop_database(database="database_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].database == "database_value" + + +@pytest.mark.asyncio +async def test_drop_database_flattened_error_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.drop_database( + spanner_database_admin.DropDatabaseRequest(), database="database_value", + ) + + +def test_get_database_ddl( + transport: str = "grpc", request_type=spanner_database_admin.GetDatabaseDdlRequest +): + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_database_ddl), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = spanner_database_admin.GetDatabaseDdlResponse( + statements=["statements_value"], + ) + + response = client.get_database_ddl(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_database_admin.GetDatabaseDdlRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, spanner_database_admin.GetDatabaseDdlResponse) + + assert response.statements == ["statements_value"] + + +def test_get_database_ddl_from_dict(): + test_get_database_ddl(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_database_ddl_async( + transport: str = "grpc_asyncio", + request_type=spanner_database_admin.GetDatabaseDdlRequest, +): + client = DatabaseAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_database_ddl), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner_database_admin.GetDatabaseDdlResponse( + statements=["statements_value"], + ) + ) + + response = await client.get_database_ddl(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_database_admin.GetDatabaseDdlRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, spanner_database_admin.GetDatabaseDdlResponse) + + assert response.statements == ["statements_value"] + + +@pytest.mark.asyncio +async def test_get_database_ddl_async_from_dict(): + await test_get_database_ddl_async(request_type=dict) + + +def test_get_database_ddl_field_headers(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner_database_admin.GetDatabaseDdlRequest() + request.database = "database/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_database_ddl), "__call__") as call: + call.return_value = spanner_database_admin.GetDatabaseDdlResponse() + + client.get_database_ddl(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "database=database/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_database_ddl_field_headers_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner_database_admin.GetDatabaseDdlRequest() + request.database = "database/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_database_ddl), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner_database_admin.GetDatabaseDdlResponse() + ) + + await client.get_database_ddl(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "database=database/value",) in kw["metadata"] + + +def test_get_database_ddl_flattened(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_database_ddl), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = spanner_database_admin.GetDatabaseDdlResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_database_ddl(database="database_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].database == "database_value" + + +def test_get_database_ddl_flattened_error(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_database_ddl( + spanner_database_admin.GetDatabaseDdlRequest(), database="database_value", + ) + + +@pytest.mark.asyncio +async def test_get_database_ddl_flattened_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_database_ddl), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = spanner_database_admin.GetDatabaseDdlResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner_database_admin.GetDatabaseDdlResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_database_ddl(database="database_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].database == "database_value" + + +@pytest.mark.asyncio +async def test_get_database_ddl_flattened_error_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_database_ddl( + spanner_database_admin.GetDatabaseDdlRequest(), database="database_value", + ) + + +def test_set_iam_policy( + transport: str = "grpc", request_type=iam_policy.SetIamPolicyRequest +): + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy(version=774, etag=b"etag_blob",) + + response = client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.SetIamPolicyRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, policy.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_set_iam_policy_from_dict(): + test_set_iam_policy(request_type=dict) + + +@pytest.mark.asyncio +async def test_set_iam_policy_async( + transport: str = "grpc_asyncio", request_type=iam_policy.SetIamPolicyRequest +): + client = DatabaseAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.SetIamPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, policy.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_set_iam_policy_async_from_dict(): + await test_set_iam_policy_async(request_type=dict) + + +def test_set_iam_policy_field_headers(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_set_iam_policy_from_dict_foreign(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy.Policy(version=774), + } + ) + call.assert_called() + + +def test_set_iam_policy_flattened(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_iam_policy(resource="resource_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + +def test_set_iam_policy_flattened_error(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_iam_policy( + iam_policy.SetIamPolicyRequest(), resource="resource_value", + ) + + +@pytest.mark.asyncio +async def test_set_iam_policy_flattened_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy.Policy()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_iam_policy(resource="resource_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + +@pytest.mark.asyncio +async def test_set_iam_policy_flattened_error_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_iam_policy( + iam_policy.SetIamPolicyRequest(), resource="resource_value", + ) + + +def test_get_iam_policy( + transport: str = "grpc", request_type=iam_policy.GetIamPolicyRequest +): + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.GetIamPolicyRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, policy.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_from_dict(): + test_get_iam_policy(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_iam_policy_async( + transport: str = "grpc_asyncio", request_type=iam_policy.GetIamPolicyRequest +): + client = DatabaseAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.GetIamPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, policy.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async_from_dict(): + await test_get_iam_policy_async(request_type=dict) + + +def test_get_iam_policy_field_headers(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict_foreign(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + + +def test_get_iam_policy_flattened(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_iam_policy(resource="resource_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + +def test_get_iam_policy_flattened_error(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_iam_policy( + iam_policy.GetIamPolicyRequest(), resource="resource_value", + ) + + +@pytest.mark.asyncio +async def test_get_iam_policy_flattened_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy.Policy()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_iam_policy(resource="resource_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + +@pytest.mark.asyncio +async def test_get_iam_policy_flattened_error_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_iam_policy( + iam_policy.GetIamPolicyRequest(), resource="resource_value", + ) + + +def test_test_iam_permissions( + transport: str = "grpc", request_type=iam_policy.TestIamPermissionsRequest +): + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.TestIamPermissionsRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, iam_policy.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_from_dict(): + test_test_iam_permissions(request_type=dict) + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async( + transport: str = "grpc_asyncio", request_type=iam_policy.TestIamPermissionsRequest +): + client = DatabaseAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.TestIamPermissionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async_from_dict(): + await test_test_iam_permissions_async(request_type=dict) + + +def test_test_iam_permissions_field_headers(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict_foreign(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + + +def test_test_iam_permissions_flattened(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy.TestIamPermissionsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.test_iam_permissions( + resource="resource_value", permissions=["permissions_value"], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + assert args[0].permissions == ["permissions_value"] + + +def test_test_iam_permissions_flattened_error(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + iam_policy.TestIamPermissionsRequest(), + resource="resource_value", + permissions=["permissions_value"], + ) + + +@pytest.mark.asyncio +async def test_test_iam_permissions_flattened_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy.TestIamPermissionsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy.TestIamPermissionsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.test_iam_permissions( + resource="resource_value", permissions=["permissions_value"], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + assert args[0].permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_flattened_error_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.test_iam_permissions( + iam_policy.TestIamPermissionsRequest(), + resource="resource_value", + permissions=["permissions_value"], + ) + + +def test_create_backup( + transport: str = "grpc", request_type=gsad_backup.CreateBackupRequest +): + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.create_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == gsad_backup.CreateBackupRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_backup_from_dict(): + test_create_backup(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_backup_async( + transport: str = "grpc_asyncio", request_type=gsad_backup.CreateBackupRequest +): + client = DatabaseAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.create_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == gsad_backup.CreateBackupRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_backup_async_from_dict(): + await test_create_backup_async(request_type=dict) + + +def test_create_backup_field_headers(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = gsad_backup.CreateBackupRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.create_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_backup_field_headers_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = gsad_backup.CreateBackupRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.create_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_backup_flattened(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_backup( + parent="parent_value", + backup=gsad_backup.Backup(database="database_value"), + backup_id="backup_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].backup == gsad_backup.Backup(database="database_value") + + assert args[0].backup_id == "backup_id_value" + + +def test_create_backup_flattened_error(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_backup( + gsad_backup.CreateBackupRequest(), + parent="parent_value", + backup=gsad_backup.Backup(database="database_value"), + backup_id="backup_id_value", + ) + + +@pytest.mark.asyncio +async def test_create_backup_flattened_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_backup( + parent="parent_value", + backup=gsad_backup.Backup(database="database_value"), + backup_id="backup_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].backup == gsad_backup.Backup(database="database_value") + + assert args[0].backup_id == "backup_id_value" + + +@pytest.mark.asyncio +async def test_create_backup_flattened_error_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_backup( + gsad_backup.CreateBackupRequest(), + parent="parent_value", + backup=gsad_backup.Backup(database="database_value"), + backup_id="backup_id_value", + ) + + +def test_get_backup(transport: str = "grpc", request_type=backup.GetBackupRequest): + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = backup.Backup( + database="database_value", + name="name_value", + size_bytes=1089, + state=backup.Backup.State.CREATING, + referencing_databases=["referencing_databases_value"], + ) + + response = client.get_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == backup.GetBackupRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, backup.Backup) + + assert response.database == "database_value" + + assert response.name == "name_value" + + assert response.size_bytes == 1089 + + assert response.state == backup.Backup.State.CREATING + + assert response.referencing_databases == ["referencing_databases_value"] + + +def test_get_backup_from_dict(): + test_get_backup(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_backup_async( + transport: str = "grpc_asyncio", request_type=backup.GetBackupRequest +): + client = DatabaseAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + backup.Backup( + database="database_value", + name="name_value", + size_bytes=1089, + state=backup.Backup.State.CREATING, + referencing_databases=["referencing_databases_value"], + ) + ) + + response = await client.get_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == backup.GetBackupRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, backup.Backup) + + assert response.database == "database_value" + + assert response.name == "name_value" + + assert response.size_bytes == 1089 + + assert response.state == backup.Backup.State.CREATING + + assert response.referencing_databases == ["referencing_databases_value"] + + +@pytest.mark.asyncio +async def test_get_backup_async_from_dict(): + await test_get_backup_async(request_type=dict) + + +def test_get_backup_field_headers(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = backup.GetBackupRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + call.return_value = backup.Backup() + + client.get_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_backup_field_headers_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = backup.GetBackupRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(backup.Backup()) + + await client.get_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_backup_flattened(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = backup.Backup() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_backup(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_backup_flattened_error(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_backup( + backup.GetBackupRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_backup_flattened_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = backup.Backup() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(backup.Backup()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_backup(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_backup_flattened_error_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_backup( + backup.GetBackupRequest(), name="name_value", + ) + + +def test_update_backup( + transport: str = "grpc", request_type=gsad_backup.UpdateBackupRequest +): + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gsad_backup.Backup( + database="database_value", + name="name_value", + size_bytes=1089, + state=gsad_backup.Backup.State.CREATING, + referencing_databases=["referencing_databases_value"], + ) + + response = client.update_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == gsad_backup.UpdateBackupRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, gsad_backup.Backup) + + assert response.database == "database_value" + + assert response.name == "name_value" + + assert response.size_bytes == 1089 + + assert response.state == gsad_backup.Backup.State.CREATING + + assert response.referencing_databases == ["referencing_databases_value"] + + +def test_update_backup_from_dict(): + test_update_backup(request_type=dict) + + +@pytest.mark.asyncio +async def test_update_backup_async( + transport: str = "grpc_asyncio", request_type=gsad_backup.UpdateBackupRequest +): + client = DatabaseAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gsad_backup.Backup( + database="database_value", + name="name_value", + size_bytes=1089, + state=gsad_backup.Backup.State.CREATING, + referencing_databases=["referencing_databases_value"], + ) + ) + + response = await client.update_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == gsad_backup.UpdateBackupRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gsad_backup.Backup) + + assert response.database == "database_value" + + assert response.name == "name_value" + + assert response.size_bytes == 1089 + + assert response.state == gsad_backup.Backup.State.CREATING + + assert response.referencing_databases == ["referencing_databases_value"] + + +@pytest.mark.asyncio +async def test_update_backup_async_from_dict(): + await test_update_backup_async(request_type=dict) + + +def test_update_backup_field_headers(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = gsad_backup.UpdateBackupRequest() + request.backup.name = "backup.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + call.return_value = gsad_backup.Backup() + + client.update_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "backup.name=backup.name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_backup_field_headers_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = gsad_backup.UpdateBackupRequest() + request.backup.name = "backup.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gsad_backup.Backup()) + + await client.update_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "backup.name=backup.name/value",) in kw["metadata"] + + +def test_update_backup_flattened(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gsad_backup.Backup() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_backup( + backup=gsad_backup.Backup(database="database_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].backup == gsad_backup.Backup(database="database_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +def test_update_backup_flattened_error(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_backup( + gsad_backup.UpdateBackupRequest(), + backup=gsad_backup.Backup(database="database_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_backup_flattened_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gsad_backup.Backup() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gsad_backup.Backup()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_backup( + backup=gsad_backup.Backup(database="database_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].backup == gsad_backup.Backup(database="database_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +@pytest.mark.asyncio +async def test_update_backup_flattened_error_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_backup( + gsad_backup.UpdateBackupRequest(), + backup=gsad_backup.Backup(database="database_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_backup( + transport: str = "grpc", request_type=backup.DeleteBackupRequest +): + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == backup.DeleteBackupRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_backup_from_dict(): + test_delete_backup(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_backup_async( + transport: str = "grpc_asyncio", request_type=backup.DeleteBackupRequest +): + client = DatabaseAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.delete_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == backup.DeleteBackupRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_backup_async_from_dict(): + await test_delete_backup_async(request_type=dict) + + +def test_delete_backup_field_headers(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = backup.DeleteBackupRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + call.return_value = None + + client.delete_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_backup_field_headers_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = backup.DeleteBackupRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.delete_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_backup_flattened(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_backup(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_backup_flattened_error(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_backup( + backup.DeleteBackupRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_backup_flattened_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_backup(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_backup_flattened_error_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_backup( + backup.DeleteBackupRequest(), name="name_value", + ) + + +def test_list_backups(transport: str = "grpc", request_type=backup.ListBackupsRequest): + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = backup.ListBackupsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_backups(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == backup.ListBackupsRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListBackupsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_backups_from_dict(): + test_list_backups(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_backups_async( + transport: str = "grpc_asyncio", request_type=backup.ListBackupsRequest +): + client = DatabaseAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + backup.ListBackupsResponse(next_page_token="next_page_token_value",) + ) + + response = await client.list_backups(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == backup.ListBackupsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListBackupsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_backups_async_from_dict(): + await test_list_backups_async(request_type=dict) + + +def test_list_backups_field_headers(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = backup.ListBackupsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + call.return_value = backup.ListBackupsResponse() + + client.list_backups(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_backups_field_headers_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = backup.ListBackupsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + backup.ListBackupsResponse() + ) + + await client.list_backups(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_backups_flattened(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = backup.ListBackupsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_backups(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_backups_flattened_error(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_backups( + backup.ListBackupsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_backups_flattened_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = backup.ListBackupsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + backup.ListBackupsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_backups(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_backups_flattened_error_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_backups( + backup.ListBackupsRequest(), parent="parent_value", + ) + + +def test_list_backups_pager(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + backup.ListBackupsResponse( + backups=[backup.Backup(), backup.Backup(), backup.Backup(),], + next_page_token="abc", + ), + backup.ListBackupsResponse(backups=[], next_page_token="def",), + backup.ListBackupsResponse( + backups=[backup.Backup(),], next_page_token="ghi", + ), + backup.ListBackupsResponse(backups=[backup.Backup(), backup.Backup(),],), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_backups(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, backup.Backup) for i in results) + + +def test_list_backups_pages(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + backup.ListBackupsResponse( + backups=[backup.Backup(), backup.Backup(), backup.Backup(),], + next_page_token="abc", + ), + backup.ListBackupsResponse(backups=[], next_page_token="def",), + backup.ListBackupsResponse( + backups=[backup.Backup(),], next_page_token="ghi", + ), + backup.ListBackupsResponse(backups=[backup.Backup(), backup.Backup(),],), + RuntimeError, + ) + pages = list(client.list_backups(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_backups_async_pager(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_backups), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + backup.ListBackupsResponse( + backups=[backup.Backup(), backup.Backup(), backup.Backup(),], + next_page_token="abc", + ), + backup.ListBackupsResponse(backups=[], next_page_token="def",), + backup.ListBackupsResponse( + backups=[backup.Backup(),], next_page_token="ghi", + ), + backup.ListBackupsResponse(backups=[backup.Backup(), backup.Backup(),],), + RuntimeError, + ) + async_pager = await client.list_backups(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, backup.Backup) for i in responses) + + +@pytest.mark.asyncio +async def test_list_backups_async_pages(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_backups), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + backup.ListBackupsResponse( + backups=[backup.Backup(), backup.Backup(), backup.Backup(),], + next_page_token="abc", + ), + backup.ListBackupsResponse(backups=[], next_page_token="def",), + backup.ListBackupsResponse( + backups=[backup.Backup(),], next_page_token="ghi", + ), + backup.ListBackupsResponse(backups=[backup.Backup(), backup.Backup(),],), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_backups(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_restore_database( + transport: str = "grpc", request_type=spanner_database_admin.RestoreDatabaseRequest +): + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.restore_database), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.restore_database(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_database_admin.RestoreDatabaseRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_restore_database_from_dict(): + test_restore_database(request_type=dict) + + +@pytest.mark.asyncio +async def test_restore_database_async( + transport: str = "grpc_asyncio", + request_type=spanner_database_admin.RestoreDatabaseRequest, +): + client = DatabaseAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.restore_database), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.restore_database(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_database_admin.RestoreDatabaseRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_restore_database_async_from_dict(): + await test_restore_database_async(request_type=dict) + + +def test_restore_database_field_headers(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner_database_admin.RestoreDatabaseRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.restore_database), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.restore_database(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_restore_database_field_headers_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner_database_admin.RestoreDatabaseRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.restore_database), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.restore_database(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_restore_database_flattened(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.restore_database), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.restore_database( + parent="parent_value", + database_id="database_id_value", + backup="backup_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].database_id == "database_id_value" + + assert args[0].backup == "backup_value" + + +def test_restore_database_flattened_error(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.restore_database( + spanner_database_admin.RestoreDatabaseRequest(), + parent="parent_value", + database_id="database_id_value", + backup="backup_value", + ) + + +@pytest.mark.asyncio +async def test_restore_database_flattened_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.restore_database), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.restore_database( + parent="parent_value", + database_id="database_id_value", + backup="backup_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].database_id == "database_id_value" + + assert args[0].backup == "backup_value" + + +@pytest.mark.asyncio +async def test_restore_database_flattened_error_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.restore_database( + spanner_database_admin.RestoreDatabaseRequest(), + parent="parent_value", + database_id="database_id_value", + backup="backup_value", + ) + + +def test_list_database_operations( + transport: str = "grpc", + request_type=spanner_database_admin.ListDatabaseOperationsRequest, +): + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_database_operations), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = spanner_database_admin.ListDatabaseOperationsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_database_operations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_database_admin.ListDatabaseOperationsRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListDatabaseOperationsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_database_operations_from_dict(): + test_list_database_operations(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_database_operations_async( + transport: str = "grpc_asyncio", + request_type=spanner_database_admin.ListDatabaseOperationsRequest, +): + client = DatabaseAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_database_operations), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner_database_admin.ListDatabaseOperationsResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_database_operations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_database_admin.ListDatabaseOperationsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDatabaseOperationsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_database_operations_async_from_dict(): + await test_list_database_operations_async(request_type=dict) + + +def test_list_database_operations_field_headers(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner_database_admin.ListDatabaseOperationsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_database_operations), "__call__" + ) as call: + call.return_value = spanner_database_admin.ListDatabaseOperationsResponse() + + client.list_database_operations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_database_operations_field_headers_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner_database_admin.ListDatabaseOperationsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_database_operations), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner_database_admin.ListDatabaseOperationsResponse() + ) + + await client.list_database_operations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_database_operations_flattened(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_database_operations), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = spanner_database_admin.ListDatabaseOperationsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_database_operations(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_database_operations_flattened_error(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_database_operations( + spanner_database_admin.ListDatabaseOperationsRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_database_operations_flattened_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_database_operations), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = spanner_database_admin.ListDatabaseOperationsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner_database_admin.ListDatabaseOperationsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_database_operations(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_database_operations_flattened_error_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_database_operations( + spanner_database_admin.ListDatabaseOperationsRequest(), + parent="parent_value", + ) + + +def test_list_database_operations_pager(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_database_operations), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + spanner_database_admin.ListDatabaseOperationsResponse( + operations=[ + operations.Operation(), + operations.Operation(), + operations.Operation(), + ], + next_page_token="abc", + ), + spanner_database_admin.ListDatabaseOperationsResponse( + operations=[], next_page_token="def", + ), + spanner_database_admin.ListDatabaseOperationsResponse( + operations=[operations.Operation(),], next_page_token="ghi", + ), + spanner_database_admin.ListDatabaseOperationsResponse( + operations=[operations.Operation(), operations.Operation(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_database_operations(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, operations.Operation) for i in results) + + +def test_list_database_operations_pages(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_database_operations), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + spanner_database_admin.ListDatabaseOperationsResponse( + operations=[ + operations.Operation(), + operations.Operation(), + operations.Operation(), + ], + next_page_token="abc", + ), + spanner_database_admin.ListDatabaseOperationsResponse( + operations=[], next_page_token="def", + ), + spanner_database_admin.ListDatabaseOperationsResponse( + operations=[operations.Operation(),], next_page_token="ghi", + ), + spanner_database_admin.ListDatabaseOperationsResponse( + operations=[operations.Operation(), operations.Operation(),], + ), + RuntimeError, + ) + pages = list(client.list_database_operations(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_database_operations_async_pager(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_database_operations), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + spanner_database_admin.ListDatabaseOperationsResponse( + operations=[ + operations.Operation(), + operations.Operation(), + operations.Operation(), + ], + next_page_token="abc", + ), + spanner_database_admin.ListDatabaseOperationsResponse( + operations=[], next_page_token="def", + ), + spanner_database_admin.ListDatabaseOperationsResponse( + operations=[operations.Operation(),], next_page_token="ghi", + ), + spanner_database_admin.ListDatabaseOperationsResponse( + operations=[operations.Operation(), operations.Operation(),], + ), + RuntimeError, + ) + async_pager = await client.list_database_operations(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, operations.Operation) for i in responses) + + +@pytest.mark.asyncio +async def test_list_database_operations_async_pages(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_database_operations), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + spanner_database_admin.ListDatabaseOperationsResponse( + operations=[ + operations.Operation(), + operations.Operation(), + operations.Operation(), + ], + next_page_token="abc", + ), + spanner_database_admin.ListDatabaseOperationsResponse( + operations=[], next_page_token="def", + ), + spanner_database_admin.ListDatabaseOperationsResponse( + operations=[operations.Operation(),], next_page_token="ghi", + ), + spanner_database_admin.ListDatabaseOperationsResponse( + operations=[operations.Operation(), operations.Operation(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_database_operations(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_list_backup_operations( + transport: str = "grpc", request_type=backup.ListBackupOperationsRequest +): + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_backup_operations), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = backup.ListBackupOperationsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_backup_operations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == backup.ListBackupOperationsRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListBackupOperationsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_backup_operations_from_dict(): + test_list_backup_operations(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_backup_operations_async( + transport: str = "grpc_asyncio", request_type=backup.ListBackupOperationsRequest +): + client = DatabaseAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_backup_operations), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + backup.ListBackupOperationsResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_backup_operations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == backup.ListBackupOperationsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListBackupOperationsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_backup_operations_async_from_dict(): + await test_list_backup_operations_async(request_type=dict) + + +def test_list_backup_operations_field_headers(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = backup.ListBackupOperationsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_backup_operations), "__call__" + ) as call: + call.return_value = backup.ListBackupOperationsResponse() + + client.list_backup_operations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_backup_operations_field_headers_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = backup.ListBackupOperationsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_backup_operations), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + backup.ListBackupOperationsResponse() + ) + + await client.list_backup_operations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_backup_operations_flattened(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_backup_operations), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = backup.ListBackupOperationsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_backup_operations(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_backup_operations_flattened_error(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_backup_operations( + backup.ListBackupOperationsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_backup_operations_flattened_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_backup_operations), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = backup.ListBackupOperationsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + backup.ListBackupOperationsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_backup_operations(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_backup_operations_flattened_error_async(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_backup_operations( + backup.ListBackupOperationsRequest(), parent="parent_value", + ) + + +def test_list_backup_operations_pager(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_backup_operations), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + backup.ListBackupOperationsResponse( + operations=[ + operations.Operation(), + operations.Operation(), + operations.Operation(), + ], + next_page_token="abc", + ), + backup.ListBackupOperationsResponse(operations=[], next_page_token="def",), + backup.ListBackupOperationsResponse( + operations=[operations.Operation(),], next_page_token="ghi", + ), + backup.ListBackupOperationsResponse( + operations=[operations.Operation(), operations.Operation(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_backup_operations(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, operations.Operation) for i in results) + + +def test_list_backup_operations_pages(): + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_backup_operations), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + backup.ListBackupOperationsResponse( + operations=[ + operations.Operation(), + operations.Operation(), + operations.Operation(), + ], + next_page_token="abc", + ), + backup.ListBackupOperationsResponse(operations=[], next_page_token="def",), + backup.ListBackupOperationsResponse( + operations=[operations.Operation(),], next_page_token="ghi", + ), + backup.ListBackupOperationsResponse( + operations=[operations.Operation(), operations.Operation(),], + ), + RuntimeError, + ) + pages = list(client.list_backup_operations(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_backup_operations_async_pager(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_backup_operations), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + backup.ListBackupOperationsResponse( + operations=[ + operations.Operation(), + operations.Operation(), + operations.Operation(), + ], + next_page_token="abc", + ), + backup.ListBackupOperationsResponse(operations=[], next_page_token="def",), + backup.ListBackupOperationsResponse( + operations=[operations.Operation(),], next_page_token="ghi", + ), + backup.ListBackupOperationsResponse( + operations=[operations.Operation(), operations.Operation(),], + ), + RuntimeError, + ) + async_pager = await client.list_backup_operations(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, operations.Operation) for i in responses) + + +@pytest.mark.asyncio +async def test_list_backup_operations_async_pages(): + client = DatabaseAdminAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_backup_operations), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + backup.ListBackupOperationsResponse( + operations=[ + operations.Operation(), + operations.Operation(), + operations.Operation(), + ], + next_page_token="abc", + ), + backup.ListBackupOperationsResponse(operations=[], next_page_token="def",), + backup.ListBackupOperationsResponse( + operations=[operations.Operation(),], next_page_token="ghi", + ), + backup.ListBackupOperationsResponse( + operations=[operations.Operation(), operations.Operation(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_backup_operations(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.DatabaseAdminGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.DatabaseAdminGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = DatabaseAdminClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.DatabaseAdminGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = DatabaseAdminClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.DatabaseAdminGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = DatabaseAdminClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.DatabaseAdminGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.DatabaseAdminGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.DatabaseAdminGrpcTransport, + transports.DatabaseAdminGrpcAsyncIOTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = DatabaseAdminClient(credentials=credentials.AnonymousCredentials(),) + assert isinstance(client.transport, transports.DatabaseAdminGrpcTransport,) + + +def test_database_admin_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.DatabaseAdminTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_database_admin_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.spanner_admin_database_v1.services.database_admin.transports.DatabaseAdminTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.DatabaseAdminTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "list_databases", + "create_database", + "get_database", + "update_database_ddl", + "drop_database", + "get_database_ddl", + "set_iam_policy", + "get_iam_policy", + "test_iam_permissions", + "create_backup", + "get_backup", + "update_backup", + "delete_backup", + "list_backups", + "restore_database", + "list_database_operations", + "list_backup_operations", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +def test_database_admin_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.spanner_admin_database_v1.services.database_admin.transports.DatabaseAdminTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.DatabaseAdminTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.admin", + ), + quota_project_id="octopus", + ) + + +def test_database_admin_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(auth, "default") as adc, mock.patch( + "google.cloud.spanner_admin_database_v1.services.database_admin.transports.DatabaseAdminTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.DatabaseAdminTransport() + adc.assert_called_once() + + +def test_database_admin_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + DatabaseAdminClient() + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.admin", + ), + quota_project_id=None, + ) + + +def test_database_admin_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.DatabaseAdminGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.admin", + ), + quota_project_id="octopus", + ) + + +def test_database_admin_host_no_port(): + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="spanner.googleapis.com" + ), + ) + assert client.transport._host == "spanner.googleapis.com:443" + + +def test_database_admin_host_with_port(): + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="spanner.googleapis.com:8000" + ), + ) + assert client.transport._host == "spanner.googleapis.com:8000" + + +def test_database_admin_grpc_transport_channel(): + channel = grpc.insecure_channel("http://localhost/") + + # Check that channel is used if provided. + transport = transports.DatabaseAdminGrpcTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + + +def test_database_admin_grpc_asyncio_transport_channel(): + channel = aio.insecure_channel("http://localhost/") + + # Check that channel is used if provided. + transport = transports.DatabaseAdminGrpcAsyncIOTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.DatabaseAdminGrpcTransport, + transports.DatabaseAdminGrpcAsyncIOTransport, + ], +) +def test_database_admin_transport_channel_mtls_with_client_cert_source(transport_class): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.admin", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.DatabaseAdminGrpcTransport, + transports.DatabaseAdminGrpcAsyncIOTransport, + ], +) +def test_database_admin_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.admin", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_database_admin_grpc_lro_client(): + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_database_admin_grpc_lro_async_client(): + client = DatabaseAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_backup_path(): + project = "squid" + instance = "clam" + backup = "whelk" + + expected = "projects/{project}/instances/{instance}/backups/{backup}".format( + project=project, instance=instance, backup=backup, + ) + actual = DatabaseAdminClient.backup_path(project, instance, backup) + assert expected == actual + + +def test_parse_backup_path(): + expected = { + "project": "octopus", + "instance": "oyster", + "backup": "nudibranch", + } + path = DatabaseAdminClient.backup_path(**expected) + + # Check that the path construction is reversible. + actual = DatabaseAdminClient.parse_backup_path(path) + assert expected == actual + + +def test_database_path(): + project = "cuttlefish" + instance = "mussel" + database = "winkle" + + expected = "projects/{project}/instances/{instance}/databases/{database}".format( + project=project, instance=instance, database=database, + ) + actual = DatabaseAdminClient.database_path(project, instance, database) + assert expected == actual + + +def test_parse_database_path(): + expected = { + "project": "nautilus", + "instance": "scallop", + "database": "abalone", + } + path = DatabaseAdminClient.database_path(**expected) + + # Check that the path construction is reversible. + actual = DatabaseAdminClient.parse_database_path(path) + assert expected == actual + + +def test_instance_path(): + project = "squid" + instance = "clam" + + expected = "projects/{project}/instances/{instance}".format( + project=project, instance=instance, + ) + actual = DatabaseAdminClient.instance_path(project, instance) + assert expected == actual + + +def test_parse_instance_path(): + expected = { + "project": "whelk", + "instance": "octopus", + } + path = DatabaseAdminClient.instance_path(**expected) + + # Check that the path construction is reversible. + actual = DatabaseAdminClient.parse_instance_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "oyster" + + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = DatabaseAdminClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nudibranch", + } + path = DatabaseAdminClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = DatabaseAdminClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "cuttlefish" + + expected = "folders/{folder}".format(folder=folder,) + actual = DatabaseAdminClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "mussel", + } + path = DatabaseAdminClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = DatabaseAdminClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "winkle" + + expected = "organizations/{organization}".format(organization=organization,) + actual = DatabaseAdminClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nautilus", + } + path = DatabaseAdminClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = DatabaseAdminClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "scallop" + + expected = "projects/{project}".format(project=project,) + actual = DatabaseAdminClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "abalone", + } + path = DatabaseAdminClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = DatabaseAdminClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "squid" + location = "clam" + + expected = "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + actual = DatabaseAdminClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "whelk", + "location": "octopus", + } + path = DatabaseAdminClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = DatabaseAdminClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.DatabaseAdminTransport, "_prep_wrapped_messages" + ) as prep: + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.DatabaseAdminTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = DatabaseAdminClient.get_transport_class() + transport = transport_class( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/tests/unit/gapic/spanner_admin_instance_v1/__init__.py b/tests/unit/gapic/spanner_admin_instance_v1/__init__.py new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/tests/unit/gapic/spanner_admin_instance_v1/__init__.py @@ -0,0 +1 @@ + diff --git a/tests/unit/gapic/spanner_admin_instance_v1/test_instance_admin.py b/tests/unit/gapic/spanner_admin_instance_v1/test_instance_admin.py new file mode 100644 index 0000000000..0db8185b79 --- /dev/null +++ b/tests/unit/gapic/spanner_admin_instance_v1/test_instance_admin.py @@ -0,0 +1,3380 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.spanner_admin_instance_v1.services.instance_admin import ( + InstanceAdminAsyncClient, +) +from google.cloud.spanner_admin_instance_v1.services.instance_admin import ( + InstanceAdminClient, +) +from google.cloud.spanner_admin_instance_v1.services.instance_admin import pagers +from google.cloud.spanner_admin_instance_v1.services.instance_admin import transports +from google.cloud.spanner_admin_instance_v1.types import spanner_instance_admin +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import options_pb2 as options # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import field_mask_pb2 as gp_field_mask # type: ignore +from google.type import expr_pb2 as expr # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert InstanceAdminClient._get_default_mtls_endpoint(None) is None + assert ( + InstanceAdminClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + InstanceAdminClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + InstanceAdminClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + InstanceAdminClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + InstanceAdminClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class", [InstanceAdminClient, InstanceAdminAsyncClient] +) +def test_instance_admin_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + + assert client.transport._host == "spanner.googleapis.com:443" + + +def test_instance_admin_client_get_transport_class(): + transport = InstanceAdminClient.get_transport_class() + assert transport == transports.InstanceAdminGrpcTransport + + transport = InstanceAdminClient.get_transport_class("grpc") + assert transport == transports.InstanceAdminGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (InstanceAdminClient, transports.InstanceAdminGrpcTransport, "grpc"), + ( + InstanceAdminAsyncClient, + transports.InstanceAdminGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + InstanceAdminClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(InstanceAdminClient), +) +@mock.patch.object( + InstanceAdminAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(InstanceAdminAsyncClient), +) +def test_instance_admin_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(InstanceAdminClient, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(InstanceAdminClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + (InstanceAdminClient, transports.InstanceAdminGrpcTransport, "grpc", "true"), + ( + InstanceAdminAsyncClient, + transports.InstanceAdminGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + (InstanceAdminClient, transports.InstanceAdminGrpcTransport, "grpc", "false"), + ( + InstanceAdminAsyncClient, + transports.InstanceAdminGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + InstanceAdminClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(InstanceAdminClient), +) +@mock.patch.object( + InstanceAdminAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(InstanceAdminAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_instance_admin_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + ssl_channel_creds = mock.Mock() + with mock.patch( + "grpc.ssl_channel_credentials", return_value=ssl_channel_creds + ): + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_ssl_channel_creds = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_ssl_channel_creds = ssl_channel_creds + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.ssl_credentials", + new_callable=mock.PropertyMock, + ) as ssl_credentials_mock: + if use_client_cert_env == "false": + is_mtls_mock.return_value = False + ssl_credentials_mock.return_value = None + expected_host = client.DEFAULT_ENDPOINT + expected_ssl_channel_creds = None + else: + is_mtls_mock.return_value = True + ssl_credentials_mock.return_value = mock.Mock() + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_ssl_channel_creds = ( + ssl_credentials_mock.return_value + ) + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + is_mtls_mock.return_value = False + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (InstanceAdminClient, transports.InstanceAdminGrpcTransport, "grpc"), + ( + InstanceAdminAsyncClient, + transports.InstanceAdminGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_instance_admin_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (InstanceAdminClient, transports.InstanceAdminGrpcTransport, "grpc"), + ( + InstanceAdminAsyncClient, + transports.InstanceAdminGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_instance_admin_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_instance_admin_client_client_options_from_dict(): + with mock.patch( + "google.cloud.spanner_admin_instance_v1.services.instance_admin.transports.InstanceAdminGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = InstanceAdminClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_list_instance_configs( + transport: str = "grpc", + request_type=spanner_instance_admin.ListInstanceConfigsRequest, +): + client = InstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_instance_configs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = spanner_instance_admin.ListInstanceConfigsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_instance_configs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_instance_admin.ListInstanceConfigsRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListInstanceConfigsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_instance_configs_from_dict(): + test_list_instance_configs(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_instance_configs_async( + transport: str = "grpc_asyncio", + request_type=spanner_instance_admin.ListInstanceConfigsRequest, +): + client = InstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_instance_configs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner_instance_admin.ListInstanceConfigsResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_instance_configs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_instance_admin.ListInstanceConfigsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListInstanceConfigsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_instance_configs_async_from_dict(): + await test_list_instance_configs_async(request_type=dict) + + +def test_list_instance_configs_field_headers(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner_instance_admin.ListInstanceConfigsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_instance_configs), "__call__" + ) as call: + call.return_value = spanner_instance_admin.ListInstanceConfigsResponse() + + client.list_instance_configs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_instance_configs_field_headers_async(): + client = InstanceAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner_instance_admin.ListInstanceConfigsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_instance_configs), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner_instance_admin.ListInstanceConfigsResponse() + ) + + await client.list_instance_configs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_instance_configs_flattened(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_instance_configs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = spanner_instance_admin.ListInstanceConfigsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_instance_configs(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_instance_configs_flattened_error(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_instance_configs( + spanner_instance_admin.ListInstanceConfigsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_instance_configs_flattened_async(): + client = InstanceAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_instance_configs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = spanner_instance_admin.ListInstanceConfigsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner_instance_admin.ListInstanceConfigsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_instance_configs(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_instance_configs_flattened_error_async(): + client = InstanceAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_instance_configs( + spanner_instance_admin.ListInstanceConfigsRequest(), parent="parent_value", + ) + + +def test_list_instance_configs_pager(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_instance_configs), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + spanner_instance_admin.ListInstanceConfigsResponse( + instance_configs=[ + spanner_instance_admin.InstanceConfig(), + spanner_instance_admin.InstanceConfig(), + spanner_instance_admin.InstanceConfig(), + ], + next_page_token="abc", + ), + spanner_instance_admin.ListInstanceConfigsResponse( + instance_configs=[], next_page_token="def", + ), + spanner_instance_admin.ListInstanceConfigsResponse( + instance_configs=[spanner_instance_admin.InstanceConfig(),], + next_page_token="ghi", + ), + spanner_instance_admin.ListInstanceConfigsResponse( + instance_configs=[ + spanner_instance_admin.InstanceConfig(), + spanner_instance_admin.InstanceConfig(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_instance_configs(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all( + isinstance(i, spanner_instance_admin.InstanceConfig) for i in results + ) + + +def test_list_instance_configs_pages(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_instance_configs), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + spanner_instance_admin.ListInstanceConfigsResponse( + instance_configs=[ + spanner_instance_admin.InstanceConfig(), + spanner_instance_admin.InstanceConfig(), + spanner_instance_admin.InstanceConfig(), + ], + next_page_token="abc", + ), + spanner_instance_admin.ListInstanceConfigsResponse( + instance_configs=[], next_page_token="def", + ), + spanner_instance_admin.ListInstanceConfigsResponse( + instance_configs=[spanner_instance_admin.InstanceConfig(),], + next_page_token="ghi", + ), + spanner_instance_admin.ListInstanceConfigsResponse( + instance_configs=[ + spanner_instance_admin.InstanceConfig(), + spanner_instance_admin.InstanceConfig(), + ], + ), + RuntimeError, + ) + pages = list(client.list_instance_configs(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_instance_configs_async_pager(): + client = InstanceAdminAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_instance_configs), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + spanner_instance_admin.ListInstanceConfigsResponse( + instance_configs=[ + spanner_instance_admin.InstanceConfig(), + spanner_instance_admin.InstanceConfig(), + spanner_instance_admin.InstanceConfig(), + ], + next_page_token="abc", + ), + spanner_instance_admin.ListInstanceConfigsResponse( + instance_configs=[], next_page_token="def", + ), + spanner_instance_admin.ListInstanceConfigsResponse( + instance_configs=[spanner_instance_admin.InstanceConfig(),], + next_page_token="ghi", + ), + spanner_instance_admin.ListInstanceConfigsResponse( + instance_configs=[ + spanner_instance_admin.InstanceConfig(), + spanner_instance_admin.InstanceConfig(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_instance_configs(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all( + isinstance(i, spanner_instance_admin.InstanceConfig) for i in responses + ) + + +@pytest.mark.asyncio +async def test_list_instance_configs_async_pages(): + client = InstanceAdminAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_instance_configs), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + spanner_instance_admin.ListInstanceConfigsResponse( + instance_configs=[ + spanner_instance_admin.InstanceConfig(), + spanner_instance_admin.InstanceConfig(), + spanner_instance_admin.InstanceConfig(), + ], + next_page_token="abc", + ), + spanner_instance_admin.ListInstanceConfigsResponse( + instance_configs=[], next_page_token="def", + ), + spanner_instance_admin.ListInstanceConfigsResponse( + instance_configs=[spanner_instance_admin.InstanceConfig(),], + next_page_token="ghi", + ), + spanner_instance_admin.ListInstanceConfigsResponse( + instance_configs=[ + spanner_instance_admin.InstanceConfig(), + spanner_instance_admin.InstanceConfig(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_instance_configs(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_get_instance_config( + transport: str = "grpc", + request_type=spanner_instance_admin.GetInstanceConfigRequest, +): + client = InstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_instance_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = spanner_instance_admin.InstanceConfig( + name="name_value", display_name="display_name_value", + ) + + response = client.get_instance_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_instance_admin.GetInstanceConfigRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, spanner_instance_admin.InstanceConfig) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + +def test_get_instance_config_from_dict(): + test_get_instance_config(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_instance_config_async( + transport: str = "grpc_asyncio", + request_type=spanner_instance_admin.GetInstanceConfigRequest, +): + client = InstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_instance_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner_instance_admin.InstanceConfig( + name="name_value", display_name="display_name_value", + ) + ) + + response = await client.get_instance_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_instance_admin.GetInstanceConfigRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, spanner_instance_admin.InstanceConfig) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + +@pytest.mark.asyncio +async def test_get_instance_config_async_from_dict(): + await test_get_instance_config_async(request_type=dict) + + +def test_get_instance_config_field_headers(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner_instance_admin.GetInstanceConfigRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_instance_config), "__call__" + ) as call: + call.return_value = spanner_instance_admin.InstanceConfig() + + client.get_instance_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_instance_config_field_headers_async(): + client = InstanceAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner_instance_admin.GetInstanceConfigRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_instance_config), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner_instance_admin.InstanceConfig() + ) + + await client.get_instance_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_instance_config_flattened(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_instance_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = spanner_instance_admin.InstanceConfig() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_instance_config(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_instance_config_flattened_error(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_instance_config( + spanner_instance_admin.GetInstanceConfigRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_instance_config_flattened_async(): + client = InstanceAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_instance_config), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = spanner_instance_admin.InstanceConfig() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner_instance_admin.InstanceConfig() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_instance_config(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_instance_config_flattened_error_async(): + client = InstanceAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_instance_config( + spanner_instance_admin.GetInstanceConfigRequest(), name="name_value", + ) + + +def test_list_instances( + transport: str = "grpc", request_type=spanner_instance_admin.ListInstancesRequest +): + client = InstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_instances), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = spanner_instance_admin.ListInstancesResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_instances(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_instance_admin.ListInstancesRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListInstancesPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_instances_from_dict(): + test_list_instances(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_instances_async( + transport: str = "grpc_asyncio", + request_type=spanner_instance_admin.ListInstancesRequest, +): + client = InstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_instances), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner_instance_admin.ListInstancesResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_instances(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_instance_admin.ListInstancesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListInstancesAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_instances_async_from_dict(): + await test_list_instances_async(request_type=dict) + + +def test_list_instances_field_headers(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner_instance_admin.ListInstancesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_instances), "__call__") as call: + call.return_value = spanner_instance_admin.ListInstancesResponse() + + client.list_instances(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_instances_field_headers_async(): + client = InstanceAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner_instance_admin.ListInstancesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_instances), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner_instance_admin.ListInstancesResponse() + ) + + await client.list_instances(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_instances_flattened(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_instances), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = spanner_instance_admin.ListInstancesResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_instances(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_instances_flattened_error(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_instances( + spanner_instance_admin.ListInstancesRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_instances_flattened_async(): + client = InstanceAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_instances), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = spanner_instance_admin.ListInstancesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner_instance_admin.ListInstancesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_instances(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_instances_flattened_error_async(): + client = InstanceAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_instances( + spanner_instance_admin.ListInstancesRequest(), parent="parent_value", + ) + + +def test_list_instances_pager(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_instances), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + spanner_instance_admin.ListInstancesResponse( + instances=[ + spanner_instance_admin.Instance(), + spanner_instance_admin.Instance(), + spanner_instance_admin.Instance(), + ], + next_page_token="abc", + ), + spanner_instance_admin.ListInstancesResponse( + instances=[], next_page_token="def", + ), + spanner_instance_admin.ListInstancesResponse( + instances=[spanner_instance_admin.Instance(),], next_page_token="ghi", + ), + spanner_instance_admin.ListInstancesResponse( + instances=[ + spanner_instance_admin.Instance(), + spanner_instance_admin.Instance(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_instances(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, spanner_instance_admin.Instance) for i in results) + + +def test_list_instances_pages(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_instances), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + spanner_instance_admin.ListInstancesResponse( + instances=[ + spanner_instance_admin.Instance(), + spanner_instance_admin.Instance(), + spanner_instance_admin.Instance(), + ], + next_page_token="abc", + ), + spanner_instance_admin.ListInstancesResponse( + instances=[], next_page_token="def", + ), + spanner_instance_admin.ListInstancesResponse( + instances=[spanner_instance_admin.Instance(),], next_page_token="ghi", + ), + spanner_instance_admin.ListInstancesResponse( + instances=[ + spanner_instance_admin.Instance(), + spanner_instance_admin.Instance(), + ], + ), + RuntimeError, + ) + pages = list(client.list_instances(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_instances_async_pager(): + client = InstanceAdminAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_instances), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + spanner_instance_admin.ListInstancesResponse( + instances=[ + spanner_instance_admin.Instance(), + spanner_instance_admin.Instance(), + spanner_instance_admin.Instance(), + ], + next_page_token="abc", + ), + spanner_instance_admin.ListInstancesResponse( + instances=[], next_page_token="def", + ), + spanner_instance_admin.ListInstancesResponse( + instances=[spanner_instance_admin.Instance(),], next_page_token="ghi", + ), + spanner_instance_admin.ListInstancesResponse( + instances=[ + spanner_instance_admin.Instance(), + spanner_instance_admin.Instance(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_instances(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, spanner_instance_admin.Instance) for i in responses) + + +@pytest.mark.asyncio +async def test_list_instances_async_pages(): + client = InstanceAdminAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_instances), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + spanner_instance_admin.ListInstancesResponse( + instances=[ + spanner_instance_admin.Instance(), + spanner_instance_admin.Instance(), + spanner_instance_admin.Instance(), + ], + next_page_token="abc", + ), + spanner_instance_admin.ListInstancesResponse( + instances=[], next_page_token="def", + ), + spanner_instance_admin.ListInstancesResponse( + instances=[spanner_instance_admin.Instance(),], next_page_token="ghi", + ), + spanner_instance_admin.ListInstancesResponse( + instances=[ + spanner_instance_admin.Instance(), + spanner_instance_admin.Instance(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_instances(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_get_instance( + transport: str = "grpc", request_type=spanner_instance_admin.GetInstanceRequest +): + client = InstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = spanner_instance_admin.Instance( + name="name_value", + config="config_value", + display_name="display_name_value", + node_count=1070, + state=spanner_instance_admin.Instance.State.CREATING, + endpoint_uris=["endpoint_uris_value"], + ) + + response = client.get_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_instance_admin.GetInstanceRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, spanner_instance_admin.Instance) + + assert response.name == "name_value" + + assert response.config == "config_value" + + assert response.display_name == "display_name_value" + + assert response.node_count == 1070 + + assert response.state == spanner_instance_admin.Instance.State.CREATING + + assert response.endpoint_uris == ["endpoint_uris_value"] + + +def test_get_instance_from_dict(): + test_get_instance(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_instance_async( + transport: str = "grpc_asyncio", + request_type=spanner_instance_admin.GetInstanceRequest, +): + client = InstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner_instance_admin.Instance( + name="name_value", + config="config_value", + display_name="display_name_value", + node_count=1070, + state=spanner_instance_admin.Instance.State.CREATING, + endpoint_uris=["endpoint_uris_value"], + ) + ) + + response = await client.get_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_instance_admin.GetInstanceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, spanner_instance_admin.Instance) + + assert response.name == "name_value" + + assert response.config == "config_value" + + assert response.display_name == "display_name_value" + + assert response.node_count == 1070 + + assert response.state == spanner_instance_admin.Instance.State.CREATING + + assert response.endpoint_uris == ["endpoint_uris_value"] + + +@pytest.mark.asyncio +async def test_get_instance_async_from_dict(): + await test_get_instance_async(request_type=dict) + + +def test_get_instance_field_headers(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner_instance_admin.GetInstanceRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_instance), "__call__") as call: + call.return_value = spanner_instance_admin.Instance() + + client.get_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_instance_field_headers_async(): + client = InstanceAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner_instance_admin.GetInstanceRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_instance), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner_instance_admin.Instance() + ) + + await client.get_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_instance_flattened(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = spanner_instance_admin.Instance() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_instance(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_instance_flattened_error(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_instance( + spanner_instance_admin.GetInstanceRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_instance_flattened_async(): + client = InstanceAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = spanner_instance_admin.Instance() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner_instance_admin.Instance() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_instance(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_instance_flattened_error_async(): + client = InstanceAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_instance( + spanner_instance_admin.GetInstanceRequest(), name="name_value", + ) + + +def test_create_instance( + transport: str = "grpc", request_type=spanner_instance_admin.CreateInstanceRequest +): + client = InstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.create_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_instance_admin.CreateInstanceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_instance_from_dict(): + test_create_instance(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_instance_async( + transport: str = "grpc_asyncio", + request_type=spanner_instance_admin.CreateInstanceRequest, +): + client = InstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.create_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_instance_admin.CreateInstanceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_instance_async_from_dict(): + await test_create_instance_async(request_type=dict) + + +def test_create_instance_field_headers(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner_instance_admin.CreateInstanceRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_instance), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.create_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_instance_field_headers_async(): + client = InstanceAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner_instance_admin.CreateInstanceRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_instance), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.create_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_instance_flattened(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_instance( + parent="parent_value", + instance_id="instance_id_value", + instance=spanner_instance_admin.Instance(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].instance_id == "instance_id_value" + + assert args[0].instance == spanner_instance_admin.Instance(name="name_value") + + +def test_create_instance_flattened_error(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_instance( + spanner_instance_admin.CreateInstanceRequest(), + parent="parent_value", + instance_id="instance_id_value", + instance=spanner_instance_admin.Instance(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_instance_flattened_async(): + client = InstanceAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_instance( + parent="parent_value", + instance_id="instance_id_value", + instance=spanner_instance_admin.Instance(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].instance_id == "instance_id_value" + + assert args[0].instance == spanner_instance_admin.Instance(name="name_value") + + +@pytest.mark.asyncio +async def test_create_instance_flattened_error_async(): + client = InstanceAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_instance( + spanner_instance_admin.CreateInstanceRequest(), + parent="parent_value", + instance_id="instance_id_value", + instance=spanner_instance_admin.Instance(name="name_value"), + ) + + +def test_update_instance( + transport: str = "grpc", request_type=spanner_instance_admin.UpdateInstanceRequest +): + client = InstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_instance_admin.UpdateInstanceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_instance_from_dict(): + test_update_instance(request_type=dict) + + +@pytest.mark.asyncio +async def test_update_instance_async( + transport: str = "grpc_asyncio", + request_type=spanner_instance_admin.UpdateInstanceRequest, +): + client = InstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_instance_admin.UpdateInstanceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_instance_async_from_dict(): + await test_update_instance_async(request_type=dict) + + +def test_update_instance_field_headers(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner_instance_admin.UpdateInstanceRequest() + request.instance.name = "instance.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_instance), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "instance.name=instance.name/value",) in kw[ + "metadata" + ] + + +@pytest.mark.asyncio +async def test_update_instance_field_headers_async(): + client = InstanceAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner_instance_admin.UpdateInstanceRequest() + request.instance.name = "instance.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_instance), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "instance.name=instance.name/value",) in kw[ + "metadata" + ] + + +def test_update_instance_flattened(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_instance( + instance=spanner_instance_admin.Instance(name="name_value"), + field_mask=gp_field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].instance == spanner_instance_admin.Instance(name="name_value") + + assert args[0].field_mask == gp_field_mask.FieldMask(paths=["paths_value"]) + + +def test_update_instance_flattened_error(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_instance( + spanner_instance_admin.UpdateInstanceRequest(), + instance=spanner_instance_admin.Instance(name="name_value"), + field_mask=gp_field_mask.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_instance_flattened_async(): + client = InstanceAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_instance( + instance=spanner_instance_admin.Instance(name="name_value"), + field_mask=gp_field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].instance == spanner_instance_admin.Instance(name="name_value") + + assert args[0].field_mask == gp_field_mask.FieldMask(paths=["paths_value"]) + + +@pytest.mark.asyncio +async def test_update_instance_flattened_error_async(): + client = InstanceAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_instance( + spanner_instance_admin.UpdateInstanceRequest(), + instance=spanner_instance_admin.Instance(name="name_value"), + field_mask=gp_field_mask.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_instance( + transport: str = "grpc", request_type=spanner_instance_admin.DeleteInstanceRequest +): + client = InstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_instance_admin.DeleteInstanceRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_instance_from_dict(): + test_delete_instance(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_instance_async( + transport: str = "grpc_asyncio", + request_type=spanner_instance_admin.DeleteInstanceRequest, +): + client = InstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.delete_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_instance_admin.DeleteInstanceRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_instance_async_from_dict(): + await test_delete_instance_async(request_type=dict) + + +def test_delete_instance_field_headers(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner_instance_admin.DeleteInstanceRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: + call.return_value = None + + client.delete_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_instance_field_headers_async(): + client = InstanceAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner_instance_admin.DeleteInstanceRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.delete_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_instance_flattened(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_instance(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_instance_flattened_error(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_instance( + spanner_instance_admin.DeleteInstanceRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_instance_flattened_async(): + client = InstanceAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_instance(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_instance_flattened_error_async(): + client = InstanceAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_instance( + spanner_instance_admin.DeleteInstanceRequest(), name="name_value", + ) + + +def test_set_iam_policy( + transport: str = "grpc", request_type=iam_policy.SetIamPolicyRequest +): + client = InstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy(version=774, etag=b"etag_blob",) + + response = client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.SetIamPolicyRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, policy.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_set_iam_policy_from_dict(): + test_set_iam_policy(request_type=dict) + + +@pytest.mark.asyncio +async def test_set_iam_policy_async( + transport: str = "grpc_asyncio", request_type=iam_policy.SetIamPolicyRequest +): + client = InstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.SetIamPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, policy.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_set_iam_policy_async_from_dict(): + await test_set_iam_policy_async(request_type=dict) + + +def test_set_iam_policy_field_headers(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = InstanceAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_set_iam_policy_from_dict_foreign(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials(),) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy.Policy(version=774), + } + ) + call.assert_called() + + +def test_set_iam_policy_flattened(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_iam_policy(resource="resource_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + +def test_set_iam_policy_flattened_error(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_iam_policy( + iam_policy.SetIamPolicyRequest(), resource="resource_value", + ) + + +@pytest.mark.asyncio +async def test_set_iam_policy_flattened_async(): + client = InstanceAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy.Policy()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_iam_policy(resource="resource_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + +@pytest.mark.asyncio +async def test_set_iam_policy_flattened_error_async(): + client = InstanceAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_iam_policy( + iam_policy.SetIamPolicyRequest(), resource="resource_value", + ) + + +def test_get_iam_policy( + transport: str = "grpc", request_type=iam_policy.GetIamPolicyRequest +): + client = InstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.GetIamPolicyRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, policy.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_from_dict(): + test_get_iam_policy(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_iam_policy_async( + transport: str = "grpc_asyncio", request_type=iam_policy.GetIamPolicyRequest +): + client = InstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.GetIamPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, policy.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async_from_dict(): + await test_get_iam_policy_async(request_type=dict) + + +def test_get_iam_policy_field_headers(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = InstanceAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict_foreign(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials(),) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + + +def test_get_iam_policy_flattened(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_iam_policy(resource="resource_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + +def test_get_iam_policy_flattened_error(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_iam_policy( + iam_policy.GetIamPolicyRequest(), resource="resource_value", + ) + + +@pytest.mark.asyncio +async def test_get_iam_policy_flattened_async(): + client = InstanceAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy.Policy()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_iam_policy(resource="resource_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + +@pytest.mark.asyncio +async def test_get_iam_policy_flattened_error_async(): + client = InstanceAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_iam_policy( + iam_policy.GetIamPolicyRequest(), resource="resource_value", + ) + + +def test_test_iam_permissions( + transport: str = "grpc", request_type=iam_policy.TestIamPermissionsRequest +): + client = InstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.TestIamPermissionsRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, iam_policy.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_from_dict(): + test_test_iam_permissions(request_type=dict) + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async( + transport: str = "grpc_asyncio", request_type=iam_policy.TestIamPermissionsRequest +): + client = InstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.TestIamPermissionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async_from_dict(): + await test_test_iam_permissions_async(request_type=dict) + + +def test_test_iam_permissions_field_headers(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = InstanceAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict_foreign(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials(),) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + + +def test_test_iam_permissions_flattened(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy.TestIamPermissionsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.test_iam_permissions( + resource="resource_value", permissions=["permissions_value"], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + assert args[0].permissions == ["permissions_value"] + + +def test_test_iam_permissions_flattened_error(): + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + iam_policy.TestIamPermissionsRequest(), + resource="resource_value", + permissions=["permissions_value"], + ) + + +@pytest.mark.asyncio +async def test_test_iam_permissions_flattened_async(): + client = InstanceAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy.TestIamPermissionsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy.TestIamPermissionsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.test_iam_permissions( + resource="resource_value", permissions=["permissions_value"], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + assert args[0].permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_flattened_error_async(): + client = InstanceAdminAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.test_iam_permissions( + iam_policy.TestIamPermissionsRequest(), + resource="resource_value", + permissions=["permissions_value"], + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.InstanceAdminGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = InstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.InstanceAdminGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = InstanceAdminClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.InstanceAdminGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = InstanceAdminClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.InstanceAdminGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = InstanceAdminClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.InstanceAdminGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.InstanceAdminGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.InstanceAdminGrpcTransport, + transports.InstanceAdminGrpcAsyncIOTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = InstanceAdminClient(credentials=credentials.AnonymousCredentials(),) + assert isinstance(client.transport, transports.InstanceAdminGrpcTransport,) + + +def test_instance_admin_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.InstanceAdminTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_instance_admin_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.spanner_admin_instance_v1.services.instance_admin.transports.InstanceAdminTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.InstanceAdminTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "list_instance_configs", + "get_instance_config", + "list_instances", + "get_instance", + "create_instance", + "update_instance", + "delete_instance", + "set_iam_policy", + "get_iam_policy", + "test_iam_permissions", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +def test_instance_admin_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.spanner_admin_instance_v1.services.instance_admin.transports.InstanceAdminTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.InstanceAdminTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.admin", + ), + quota_project_id="octopus", + ) + + +def test_instance_admin_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(auth, "default") as adc, mock.patch( + "google.cloud.spanner_admin_instance_v1.services.instance_admin.transports.InstanceAdminTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.InstanceAdminTransport() + adc.assert_called_once() + + +def test_instance_admin_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + InstanceAdminClient() + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.admin", + ), + quota_project_id=None, + ) + + +def test_instance_admin_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.InstanceAdminGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.admin", + ), + quota_project_id="octopus", + ) + + +def test_instance_admin_host_no_port(): + client = InstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="spanner.googleapis.com" + ), + ) + assert client.transport._host == "spanner.googleapis.com:443" + + +def test_instance_admin_host_with_port(): + client = InstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="spanner.googleapis.com:8000" + ), + ) + assert client.transport._host == "spanner.googleapis.com:8000" + + +def test_instance_admin_grpc_transport_channel(): + channel = grpc.insecure_channel("http://localhost/") + + # Check that channel is used if provided. + transport = transports.InstanceAdminGrpcTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + + +def test_instance_admin_grpc_asyncio_transport_channel(): + channel = aio.insecure_channel("http://localhost/") + + # Check that channel is used if provided. + transport = transports.InstanceAdminGrpcAsyncIOTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.InstanceAdminGrpcTransport, + transports.InstanceAdminGrpcAsyncIOTransport, + ], +) +def test_instance_admin_transport_channel_mtls_with_client_cert_source(transport_class): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.admin", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.InstanceAdminGrpcTransport, + transports.InstanceAdminGrpcAsyncIOTransport, + ], +) +def test_instance_admin_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.admin", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_instance_admin_grpc_lro_client(): + client = InstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_instance_admin_grpc_lro_async_client(): + client = InstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_instance_path(): + project = "squid" + instance = "clam" + + expected = "projects/{project}/instances/{instance}".format( + project=project, instance=instance, + ) + actual = InstanceAdminClient.instance_path(project, instance) + assert expected == actual + + +def test_parse_instance_path(): + expected = { + "project": "whelk", + "instance": "octopus", + } + path = InstanceAdminClient.instance_path(**expected) + + # Check that the path construction is reversible. + actual = InstanceAdminClient.parse_instance_path(path) + assert expected == actual + + +def test_instance_config_path(): + project = "oyster" + instance_config = "nudibranch" + + expected = "projects/{project}/instanceConfigs/{instance_config}".format( + project=project, instance_config=instance_config, + ) + actual = InstanceAdminClient.instance_config_path(project, instance_config) + assert expected == actual + + +def test_parse_instance_config_path(): + expected = { + "project": "cuttlefish", + "instance_config": "mussel", + } + path = InstanceAdminClient.instance_config_path(**expected) + + # Check that the path construction is reversible. + actual = InstanceAdminClient.parse_instance_config_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "winkle" + + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = InstanceAdminClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nautilus", + } + path = InstanceAdminClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = InstanceAdminClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "scallop" + + expected = "folders/{folder}".format(folder=folder,) + actual = InstanceAdminClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "abalone", + } + path = InstanceAdminClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = InstanceAdminClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "squid" + + expected = "organizations/{organization}".format(organization=organization,) + actual = InstanceAdminClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "clam", + } + path = InstanceAdminClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = InstanceAdminClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "whelk" + + expected = "projects/{project}".format(project=project,) + actual = InstanceAdminClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "octopus", + } + path = InstanceAdminClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = InstanceAdminClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "oyster" + location = "nudibranch" + + expected = "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + actual = InstanceAdminClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "cuttlefish", + "location": "mussel", + } + path = InstanceAdminClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = InstanceAdminClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.InstanceAdminTransport, "_prep_wrapped_messages" + ) as prep: + client = InstanceAdminClient( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.InstanceAdminTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = InstanceAdminClient.get_transport_class() + transport = transport_class( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/tests/unit/gapic/spanner_v1/__init__.py b/tests/unit/gapic/spanner_v1/__init__.py new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/tests/unit/gapic/spanner_v1/__init__.py @@ -0,0 +1 @@ + diff --git a/tests/unit/gapic/spanner_v1/test_spanner.py b/tests/unit/gapic/spanner_v1/test_spanner.py new file mode 100644 index 0000000000..d891f27d94 --- /dev/null +++ b/tests/unit/gapic/spanner_v1/test_spanner.py @@ -0,0 +1,3462 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.spanner_v1.services.spanner import SpannerAsyncClient +from google.cloud.spanner_v1.services.spanner import SpannerClient +from google.cloud.spanner_v1.services.spanner import pagers +from google.cloud.spanner_v1.services.spanner import transports +from google.cloud.spanner_v1.types import keys +from google.cloud.spanner_v1.types import mutation +from google.cloud.spanner_v1.types import result_set +from google.cloud.spanner_v1.types import spanner +from google.cloud.spanner_v1.types import transaction +from google.cloud.spanner_v1.types import type as gs_type +from google.oauth2 import service_account +from google.protobuf import duration_pb2 as duration # type: ignore +from google.protobuf import struct_pb2 as struct # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore +from google.rpc import status_pb2 as status # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert SpannerClient._get_default_mtls_endpoint(None) is None + assert SpannerClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert ( + SpannerClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + ) + assert ( + SpannerClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + SpannerClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert SpannerClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [SpannerClient, SpannerAsyncClient]) +def test_spanner_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + + assert client.transport._host == "spanner.googleapis.com:443" + + +def test_spanner_client_get_transport_class(): + transport = SpannerClient.get_transport_class() + assert transport == transports.SpannerGrpcTransport + + transport = SpannerClient.get_transport_class("grpc") + assert transport == transports.SpannerGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (SpannerClient, transports.SpannerGrpcTransport, "grpc"), + (SpannerAsyncClient, transports.SpannerGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +@mock.patch.object( + SpannerClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpannerClient) +) +@mock.patch.object( + SpannerAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpannerAsyncClient) +) +def test_spanner_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(SpannerClient, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(SpannerClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + (SpannerClient, transports.SpannerGrpcTransport, "grpc", "true"), + ( + SpannerAsyncClient, + transports.SpannerGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + (SpannerClient, transports.SpannerGrpcTransport, "grpc", "false"), + ( + SpannerAsyncClient, + transports.SpannerGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + SpannerClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpannerClient) +) +@mock.patch.object( + SpannerAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpannerAsyncClient) +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_spanner_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + ssl_channel_creds = mock.Mock() + with mock.patch( + "grpc.ssl_channel_credentials", return_value=ssl_channel_creds + ): + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_ssl_channel_creds = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_ssl_channel_creds = ssl_channel_creds + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.ssl_credentials", + new_callable=mock.PropertyMock, + ) as ssl_credentials_mock: + if use_client_cert_env == "false": + is_mtls_mock.return_value = False + ssl_credentials_mock.return_value = None + expected_host = client.DEFAULT_ENDPOINT + expected_ssl_channel_creds = None + else: + is_mtls_mock.return_value = True + ssl_credentials_mock.return_value = mock.Mock() + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_ssl_channel_creds = ( + ssl_credentials_mock.return_value + ) + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + is_mtls_mock.return_value = False + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (SpannerClient, transports.SpannerGrpcTransport, "grpc"), + (SpannerAsyncClient, transports.SpannerGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +def test_spanner_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (SpannerClient, transports.SpannerGrpcTransport, "grpc"), + (SpannerAsyncClient, transports.SpannerGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +def test_spanner_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_spanner_client_client_options_from_dict(): + with mock.patch( + "google.cloud.spanner_v1.services.spanner.transports.SpannerGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = SpannerClient(client_options={"api_endpoint": "squid.clam.whelk"}) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_create_session( + transport: str = "grpc", request_type=spanner.CreateSessionRequest +): + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_session), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = spanner.Session(name="name_value",) + + response = client.create_session(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.CreateSessionRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, spanner.Session) + + assert response.name == "name_value" + + +def test_create_session_from_dict(): + test_create_session(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_session_async( + transport: str = "grpc_asyncio", request_type=spanner.CreateSessionRequest +): + client = SpannerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_session), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner.Session(name="name_value",) + ) + + response = await client.create_session(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.CreateSessionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, spanner.Session) + + assert response.name == "name_value" + + +@pytest.mark.asyncio +async def test_create_session_async_from_dict(): + await test_create_session_async(request_type=dict) + + +def test_create_session_field_headers(): + client = SpannerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner.CreateSessionRequest() + request.database = "database/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_session), "__call__") as call: + call.return_value = spanner.Session() + + client.create_session(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "database=database/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_session_field_headers_async(): + client = SpannerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner.CreateSessionRequest() + request.database = "database/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_session), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(spanner.Session()) + + await client.create_session(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "database=database/value",) in kw["metadata"] + + +def test_create_session_flattened(): + client = SpannerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_session), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = spanner.Session() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_session(database="database_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].database == "database_value" + + +def test_create_session_flattened_error(): + client = SpannerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_session( + spanner.CreateSessionRequest(), database="database_value", + ) + + +@pytest.mark.asyncio +async def test_create_session_flattened_async(): + client = SpannerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_session), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = spanner.Session() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(spanner.Session()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_session(database="database_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].database == "database_value" + + +@pytest.mark.asyncio +async def test_create_session_flattened_error_async(): + client = SpannerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_session( + spanner.CreateSessionRequest(), database="database_value", + ) + + +def test_batch_create_sessions( + transport: str = "grpc", request_type=spanner.BatchCreateSessionsRequest +): + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_sessions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = spanner.BatchCreateSessionsResponse() + + response = client.batch_create_sessions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.BatchCreateSessionsRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, spanner.BatchCreateSessionsResponse) + + +def test_batch_create_sessions_from_dict(): + test_batch_create_sessions(request_type=dict) + + +@pytest.mark.asyncio +async def test_batch_create_sessions_async( + transport: str = "grpc_asyncio", request_type=spanner.BatchCreateSessionsRequest +): + client = SpannerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_sessions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner.BatchCreateSessionsResponse() + ) + + response = await client.batch_create_sessions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.BatchCreateSessionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, spanner.BatchCreateSessionsResponse) + + +@pytest.mark.asyncio +async def test_batch_create_sessions_async_from_dict(): + await test_batch_create_sessions_async(request_type=dict) + + +def test_batch_create_sessions_field_headers(): + client = SpannerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner.BatchCreateSessionsRequest() + request.database = "database/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_sessions), "__call__" + ) as call: + call.return_value = spanner.BatchCreateSessionsResponse() + + client.batch_create_sessions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "database=database/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_batch_create_sessions_field_headers_async(): + client = SpannerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner.BatchCreateSessionsRequest() + request.database = "database/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_sessions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner.BatchCreateSessionsResponse() + ) + + await client.batch_create_sessions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "database=database/value",) in kw["metadata"] + + +def test_batch_create_sessions_flattened(): + client = SpannerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_sessions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = spanner.BatchCreateSessionsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.batch_create_sessions( + database="database_value", session_count=1420, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].database == "database_value" + + assert args[0].session_count == 1420 + + +def test_batch_create_sessions_flattened_error(): + client = SpannerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_create_sessions( + spanner.BatchCreateSessionsRequest(), + database="database_value", + session_count=1420, + ) + + +@pytest.mark.asyncio +async def test_batch_create_sessions_flattened_async(): + client = SpannerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_sessions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = spanner.BatchCreateSessionsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner.BatchCreateSessionsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.batch_create_sessions( + database="database_value", session_count=1420, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].database == "database_value" + + assert args[0].session_count == 1420 + + +@pytest.mark.asyncio +async def test_batch_create_sessions_flattened_error_async(): + client = SpannerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.batch_create_sessions( + spanner.BatchCreateSessionsRequest(), + database="database_value", + session_count=1420, + ) + + +def test_get_session(transport: str = "grpc", request_type=spanner.GetSessionRequest): + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_session), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = spanner.Session(name="name_value",) + + response = client.get_session(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.GetSessionRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, spanner.Session) + + assert response.name == "name_value" + + +def test_get_session_from_dict(): + test_get_session(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_session_async( + transport: str = "grpc_asyncio", request_type=spanner.GetSessionRequest +): + client = SpannerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_session), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner.Session(name="name_value",) + ) + + response = await client.get_session(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.GetSessionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, spanner.Session) + + assert response.name == "name_value" + + +@pytest.mark.asyncio +async def test_get_session_async_from_dict(): + await test_get_session_async(request_type=dict) + + +def test_get_session_field_headers(): + client = SpannerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner.GetSessionRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_session), "__call__") as call: + call.return_value = spanner.Session() + + client.get_session(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_session_field_headers_async(): + client = SpannerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner.GetSessionRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_session), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(spanner.Session()) + + await client.get_session(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_session_flattened(): + client = SpannerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_session), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = spanner.Session() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_session(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_session_flattened_error(): + client = SpannerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_session( + spanner.GetSessionRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_session_flattened_async(): + client = SpannerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_session), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = spanner.Session() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(spanner.Session()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_session(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_session_flattened_error_async(): + client = SpannerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_session( + spanner.GetSessionRequest(), name="name_value", + ) + + +def test_list_sessions( + transport: str = "grpc", request_type=spanner.ListSessionsRequest +): + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_sessions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = spanner.ListSessionsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_sessions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.ListSessionsRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListSessionsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_sessions_from_dict(): + test_list_sessions(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_sessions_async( + transport: str = "grpc_asyncio", request_type=spanner.ListSessionsRequest +): + client = SpannerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_sessions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner.ListSessionsResponse(next_page_token="next_page_token_value",) + ) + + response = await client.list_sessions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.ListSessionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListSessionsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_sessions_async_from_dict(): + await test_list_sessions_async(request_type=dict) + + +def test_list_sessions_field_headers(): + client = SpannerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner.ListSessionsRequest() + request.database = "database/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_sessions), "__call__") as call: + call.return_value = spanner.ListSessionsResponse() + + client.list_sessions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "database=database/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_sessions_field_headers_async(): + client = SpannerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner.ListSessionsRequest() + request.database = "database/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_sessions), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner.ListSessionsResponse() + ) + + await client.list_sessions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "database=database/value",) in kw["metadata"] + + +def test_list_sessions_flattened(): + client = SpannerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_sessions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = spanner.ListSessionsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_sessions(database="database_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].database == "database_value" + + +def test_list_sessions_flattened_error(): + client = SpannerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_sessions( + spanner.ListSessionsRequest(), database="database_value", + ) + + +@pytest.mark.asyncio +async def test_list_sessions_flattened_async(): + client = SpannerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_sessions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = spanner.ListSessionsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner.ListSessionsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_sessions(database="database_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].database == "database_value" + + +@pytest.mark.asyncio +async def test_list_sessions_flattened_error_async(): + client = SpannerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_sessions( + spanner.ListSessionsRequest(), database="database_value", + ) + + +def test_list_sessions_pager(): + client = SpannerClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_sessions), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + spanner.ListSessionsResponse( + sessions=[spanner.Session(), spanner.Session(), spanner.Session(),], + next_page_token="abc", + ), + spanner.ListSessionsResponse(sessions=[], next_page_token="def",), + spanner.ListSessionsResponse( + sessions=[spanner.Session(),], next_page_token="ghi", + ), + spanner.ListSessionsResponse( + sessions=[spanner.Session(), spanner.Session(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("database", ""),)), + ) + pager = client.list_sessions(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, spanner.Session) for i in results) + + +def test_list_sessions_pages(): + client = SpannerClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_sessions), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + spanner.ListSessionsResponse( + sessions=[spanner.Session(), spanner.Session(), spanner.Session(),], + next_page_token="abc", + ), + spanner.ListSessionsResponse(sessions=[], next_page_token="def",), + spanner.ListSessionsResponse( + sessions=[spanner.Session(),], next_page_token="ghi", + ), + spanner.ListSessionsResponse( + sessions=[spanner.Session(), spanner.Session(),], + ), + RuntimeError, + ) + pages = list(client.list_sessions(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_sessions_async_pager(): + client = SpannerAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_sessions), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + spanner.ListSessionsResponse( + sessions=[spanner.Session(), spanner.Session(), spanner.Session(),], + next_page_token="abc", + ), + spanner.ListSessionsResponse(sessions=[], next_page_token="def",), + spanner.ListSessionsResponse( + sessions=[spanner.Session(),], next_page_token="ghi", + ), + spanner.ListSessionsResponse( + sessions=[spanner.Session(), spanner.Session(),], + ), + RuntimeError, + ) + async_pager = await client.list_sessions(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, spanner.Session) for i in responses) + + +@pytest.mark.asyncio +async def test_list_sessions_async_pages(): + client = SpannerAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_sessions), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + spanner.ListSessionsResponse( + sessions=[spanner.Session(), spanner.Session(), spanner.Session(),], + next_page_token="abc", + ), + spanner.ListSessionsResponse(sessions=[], next_page_token="def",), + spanner.ListSessionsResponse( + sessions=[spanner.Session(),], next_page_token="ghi", + ), + spanner.ListSessionsResponse( + sessions=[spanner.Session(), spanner.Session(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_sessions(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_delete_session( + transport: str = "grpc", request_type=spanner.DeleteSessionRequest +): + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_session), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_session(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.DeleteSessionRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_session_from_dict(): + test_delete_session(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_session_async( + transport: str = "grpc_asyncio", request_type=spanner.DeleteSessionRequest +): + client = SpannerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_session), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.delete_session(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.DeleteSessionRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_session_async_from_dict(): + await test_delete_session_async(request_type=dict) + + +def test_delete_session_field_headers(): + client = SpannerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner.DeleteSessionRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_session), "__call__") as call: + call.return_value = None + + client.delete_session(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_session_field_headers_async(): + client = SpannerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner.DeleteSessionRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_session), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.delete_session(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_session_flattened(): + client = SpannerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_session), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_session(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_session_flattened_error(): + client = SpannerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_session( + spanner.DeleteSessionRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_session_flattened_async(): + client = SpannerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_session), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_session(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_session_flattened_error_async(): + client = SpannerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_session( + spanner.DeleteSessionRequest(), name="name_value", + ) + + +def test_execute_sql(transport: str = "grpc", request_type=spanner.ExecuteSqlRequest): + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.execute_sql), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = result_set.ResultSet() + + response = client.execute_sql(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.ExecuteSqlRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, result_set.ResultSet) + + +def test_execute_sql_from_dict(): + test_execute_sql(request_type=dict) + + +@pytest.mark.asyncio +async def test_execute_sql_async( + transport: str = "grpc_asyncio", request_type=spanner.ExecuteSqlRequest +): + client = SpannerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.execute_sql), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + result_set.ResultSet() + ) + + response = await client.execute_sql(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.ExecuteSqlRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, result_set.ResultSet) + + +@pytest.mark.asyncio +async def test_execute_sql_async_from_dict(): + await test_execute_sql_async(request_type=dict) + + +def test_execute_sql_field_headers(): + client = SpannerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner.ExecuteSqlRequest() + request.session = "session/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.execute_sql), "__call__") as call: + call.return_value = result_set.ResultSet() + + client.execute_sql(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "session=session/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_execute_sql_field_headers_async(): + client = SpannerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner.ExecuteSqlRequest() + request.session = "session/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.execute_sql), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + result_set.ResultSet() + ) + + await client.execute_sql(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "session=session/value",) in kw["metadata"] + + +def test_execute_streaming_sql( + transport: str = "grpc", request_type=spanner.ExecuteSqlRequest +): + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.execute_streaming_sql), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iter([result_set.PartialResultSet()]) + + response = client.execute_streaming_sql(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.ExecuteSqlRequest() + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance(message, result_set.PartialResultSet) + + +def test_execute_streaming_sql_from_dict(): + test_execute_streaming_sql(request_type=dict) + + +@pytest.mark.asyncio +async def test_execute_streaming_sql_async( + transport: str = "grpc_asyncio", request_type=spanner.ExecuteSqlRequest +): + client = SpannerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.execute_streaming_sql), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[result_set.PartialResultSet()] + ) + + response = await client.execute_streaming_sql(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.ExecuteSqlRequest() + + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, result_set.PartialResultSet) + + +@pytest.mark.asyncio +async def test_execute_streaming_sql_async_from_dict(): + await test_execute_streaming_sql_async(request_type=dict) + + +def test_execute_streaming_sql_field_headers(): + client = SpannerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner.ExecuteSqlRequest() + request.session = "session/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.execute_streaming_sql), "__call__" + ) as call: + call.return_value = iter([result_set.PartialResultSet()]) + + client.execute_streaming_sql(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "session=session/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_execute_streaming_sql_field_headers_async(): + client = SpannerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner.ExecuteSqlRequest() + request.session = "session/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.execute_streaming_sql), "__call__" + ) as call: + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[result_set.PartialResultSet()] + ) + + await client.execute_streaming_sql(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "session=session/value",) in kw["metadata"] + + +def test_execute_batch_dml( + transport: str = "grpc", request_type=spanner.ExecuteBatchDmlRequest +): + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.execute_batch_dml), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = spanner.ExecuteBatchDmlResponse() + + response = client.execute_batch_dml(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.ExecuteBatchDmlRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, spanner.ExecuteBatchDmlResponse) + + +def test_execute_batch_dml_from_dict(): + test_execute_batch_dml(request_type=dict) + + +@pytest.mark.asyncio +async def test_execute_batch_dml_async( + transport: str = "grpc_asyncio", request_type=spanner.ExecuteBatchDmlRequest +): + client = SpannerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.execute_batch_dml), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner.ExecuteBatchDmlResponse() + ) + + response = await client.execute_batch_dml(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.ExecuteBatchDmlRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, spanner.ExecuteBatchDmlResponse) + + +@pytest.mark.asyncio +async def test_execute_batch_dml_async_from_dict(): + await test_execute_batch_dml_async(request_type=dict) + + +def test_execute_batch_dml_field_headers(): + client = SpannerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner.ExecuteBatchDmlRequest() + request.session = "session/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.execute_batch_dml), "__call__" + ) as call: + call.return_value = spanner.ExecuteBatchDmlResponse() + + client.execute_batch_dml(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "session=session/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_execute_batch_dml_field_headers_async(): + client = SpannerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner.ExecuteBatchDmlRequest() + request.session = "session/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.execute_batch_dml), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner.ExecuteBatchDmlResponse() + ) + + await client.execute_batch_dml(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "session=session/value",) in kw["metadata"] + + +def test_read(transport: str = "grpc", request_type=spanner.ReadRequest): + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.read), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = result_set.ResultSet() + + response = client.read(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.ReadRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, result_set.ResultSet) + + +def test_read_from_dict(): + test_read(request_type=dict) + + +@pytest.mark.asyncio +async def test_read_async( + transport: str = "grpc_asyncio", request_type=spanner.ReadRequest +): + client = SpannerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.read), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + result_set.ResultSet() + ) + + response = await client.read(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.ReadRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, result_set.ResultSet) + + +@pytest.mark.asyncio +async def test_read_async_from_dict(): + await test_read_async(request_type=dict) + + +def test_read_field_headers(): + client = SpannerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner.ReadRequest() + request.session = "session/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.read), "__call__") as call: + call.return_value = result_set.ResultSet() + + client.read(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "session=session/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_read_field_headers_async(): + client = SpannerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner.ReadRequest() + request.session = "session/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.read), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + result_set.ResultSet() + ) + + await client.read(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "session=session/value",) in kw["metadata"] + + +def test_streaming_read(transport: str = "grpc", request_type=spanner.ReadRequest): + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.streaming_read), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([result_set.PartialResultSet()]) + + response = client.streaming_read(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.ReadRequest() + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance(message, result_set.PartialResultSet) + + +def test_streaming_read_from_dict(): + test_streaming_read(request_type=dict) + + +@pytest.mark.asyncio +async def test_streaming_read_async( + transport: str = "grpc_asyncio", request_type=spanner.ReadRequest +): + client = SpannerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.streaming_read), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[result_set.PartialResultSet()] + ) + + response = await client.streaming_read(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.ReadRequest() + + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, result_set.PartialResultSet) + + +@pytest.mark.asyncio +async def test_streaming_read_async_from_dict(): + await test_streaming_read_async(request_type=dict) + + +def test_streaming_read_field_headers(): + client = SpannerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner.ReadRequest() + request.session = "session/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.streaming_read), "__call__") as call: + call.return_value = iter([result_set.PartialResultSet()]) + + client.streaming_read(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "session=session/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_streaming_read_field_headers_async(): + client = SpannerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner.ReadRequest() + request.session = "session/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.streaming_read), "__call__") as call: + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[result_set.PartialResultSet()] + ) + + await client.streaming_read(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "session=session/value",) in kw["metadata"] + + +def test_begin_transaction( + transport: str = "grpc", request_type=spanner.BeginTransactionRequest +): + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.begin_transaction), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = transaction.Transaction(id=b"id_blob",) + + response = client.begin_transaction(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.BeginTransactionRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, transaction.Transaction) + + assert response.id == b"id_blob" + + +def test_begin_transaction_from_dict(): + test_begin_transaction(request_type=dict) + + +@pytest.mark.asyncio +async def test_begin_transaction_async( + transport: str = "grpc_asyncio", request_type=spanner.BeginTransactionRequest +): + client = SpannerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.begin_transaction), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + transaction.Transaction(id=b"id_blob",) + ) + + response = await client.begin_transaction(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.BeginTransactionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, transaction.Transaction) + + assert response.id == b"id_blob" + + +@pytest.mark.asyncio +async def test_begin_transaction_async_from_dict(): + await test_begin_transaction_async(request_type=dict) + + +def test_begin_transaction_field_headers(): + client = SpannerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner.BeginTransactionRequest() + request.session = "session/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.begin_transaction), "__call__" + ) as call: + call.return_value = transaction.Transaction() + + client.begin_transaction(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "session=session/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_begin_transaction_field_headers_async(): + client = SpannerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner.BeginTransactionRequest() + request.session = "session/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.begin_transaction), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + transaction.Transaction() + ) + + await client.begin_transaction(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "session=session/value",) in kw["metadata"] + + +def test_begin_transaction_flattened(): + client = SpannerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.begin_transaction), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = transaction.Transaction() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.begin_transaction( + session="session_value", + options=transaction.TransactionOptions(read_write=None), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].session == "session_value" + + assert args[0].options == transaction.TransactionOptions(read_write=None) + + +def test_begin_transaction_flattened_error(): + client = SpannerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.begin_transaction( + spanner.BeginTransactionRequest(), + session="session_value", + options=transaction.TransactionOptions(read_write=None), + ) + + +@pytest.mark.asyncio +async def test_begin_transaction_flattened_async(): + client = SpannerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.begin_transaction), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = transaction.Transaction() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + transaction.Transaction() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.begin_transaction( + session="session_value", + options=transaction.TransactionOptions(read_write=None), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].session == "session_value" + + assert args[0].options == transaction.TransactionOptions(read_write=None) + + +@pytest.mark.asyncio +async def test_begin_transaction_flattened_error_async(): + client = SpannerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.begin_transaction( + spanner.BeginTransactionRequest(), + session="session_value", + options=transaction.TransactionOptions(read_write=None), + ) + + +def test_commit(transport: str = "grpc", request_type=spanner.CommitRequest): + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.commit), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = spanner.CommitResponse() + + response = client.commit(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.CommitRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, spanner.CommitResponse) + + +def test_commit_from_dict(): + test_commit(request_type=dict) + + +@pytest.mark.asyncio +async def test_commit_async( + transport: str = "grpc_asyncio", request_type=spanner.CommitRequest +): + client = SpannerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.commit), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner.CommitResponse() + ) + + response = await client.commit(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.CommitRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, spanner.CommitResponse) + + +@pytest.mark.asyncio +async def test_commit_async_from_dict(): + await test_commit_async(request_type=dict) + + +def test_commit_field_headers(): + client = SpannerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner.CommitRequest() + request.session = "session/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.commit), "__call__") as call: + call.return_value = spanner.CommitResponse() + + client.commit(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "session=session/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_commit_field_headers_async(): + client = SpannerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner.CommitRequest() + request.session = "session/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.commit), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner.CommitResponse() + ) + + await client.commit(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "session=session/value",) in kw["metadata"] + + +def test_commit_flattened(): + client = SpannerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.commit), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = spanner.CommitResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.commit( + session="session_value", + transaction_id=b"transaction_id_blob", + mutations=[ + mutation.Mutation(insert=mutation.Mutation.Write(table="table_value")) + ], + single_use_transaction=transaction.TransactionOptions(read_write=None), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].session == "session_value" + + assert args[0].mutations == [ + mutation.Mutation(insert=mutation.Mutation.Write(table="table_value")) + ] + + assert args[0].single_use_transaction == transaction.TransactionOptions( + read_write=None + ) + + +def test_commit_flattened_error(): + client = SpannerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.commit( + spanner.CommitRequest(), + session="session_value", + transaction_id=b"transaction_id_blob", + mutations=[ + mutation.Mutation(insert=mutation.Mutation.Write(table="table_value")) + ], + single_use_transaction=transaction.TransactionOptions(read_write=None), + ) + + +@pytest.mark.asyncio +async def test_commit_flattened_async(): + client = SpannerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.commit), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = spanner.CommitResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner.CommitResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.commit( + session="session_value", + transaction_id=b"transaction_id_blob", + mutations=[ + mutation.Mutation(insert=mutation.Mutation.Write(table="table_value")) + ], + single_use_transaction=transaction.TransactionOptions(read_write=None), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].session == "session_value" + + assert args[0].mutations == [ + mutation.Mutation(insert=mutation.Mutation.Write(table="table_value")) + ] + + assert args[0].single_use_transaction == transaction.TransactionOptions( + read_write=None + ) + + +@pytest.mark.asyncio +async def test_commit_flattened_error_async(): + client = SpannerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.commit( + spanner.CommitRequest(), + session="session_value", + transaction_id=b"transaction_id_blob", + mutations=[ + mutation.Mutation(insert=mutation.Mutation.Write(table="table_value")) + ], + single_use_transaction=transaction.TransactionOptions(read_write=None), + ) + + +def test_rollback(transport: str = "grpc", request_type=spanner.RollbackRequest): + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.rollback), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.rollback(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.RollbackRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_rollback_from_dict(): + test_rollback(request_type=dict) + + +@pytest.mark.asyncio +async def test_rollback_async( + transport: str = "grpc_asyncio", request_type=spanner.RollbackRequest +): + client = SpannerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.rollback), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.rollback(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.RollbackRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_rollback_async_from_dict(): + await test_rollback_async(request_type=dict) + + +def test_rollback_field_headers(): + client = SpannerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner.RollbackRequest() + request.session = "session/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.rollback), "__call__") as call: + call.return_value = None + + client.rollback(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "session=session/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_rollback_field_headers_async(): + client = SpannerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner.RollbackRequest() + request.session = "session/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.rollback), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.rollback(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "session=session/value",) in kw["metadata"] + + +def test_rollback_flattened(): + client = SpannerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.rollback), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.rollback( + session="session_value", transaction_id=b"transaction_id_blob", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].session == "session_value" + + assert args[0].transaction_id == b"transaction_id_blob" + + +def test_rollback_flattened_error(): + client = SpannerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.rollback( + spanner.RollbackRequest(), + session="session_value", + transaction_id=b"transaction_id_blob", + ) + + +@pytest.mark.asyncio +async def test_rollback_flattened_async(): + client = SpannerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.rollback), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.rollback( + session="session_value", transaction_id=b"transaction_id_blob", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].session == "session_value" + + assert args[0].transaction_id == b"transaction_id_blob" + + +@pytest.mark.asyncio +async def test_rollback_flattened_error_async(): + client = SpannerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.rollback( + spanner.RollbackRequest(), + session="session_value", + transaction_id=b"transaction_id_blob", + ) + + +def test_partition_query( + transport: str = "grpc", request_type=spanner.PartitionQueryRequest +): + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.partition_query), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = spanner.PartitionResponse() + + response = client.partition_query(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.PartitionQueryRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, spanner.PartitionResponse) + + +def test_partition_query_from_dict(): + test_partition_query(request_type=dict) + + +@pytest.mark.asyncio +async def test_partition_query_async( + transport: str = "grpc_asyncio", request_type=spanner.PartitionQueryRequest +): + client = SpannerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.partition_query), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner.PartitionResponse() + ) + + response = await client.partition_query(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.PartitionQueryRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, spanner.PartitionResponse) + + +@pytest.mark.asyncio +async def test_partition_query_async_from_dict(): + await test_partition_query_async(request_type=dict) + + +def test_partition_query_field_headers(): + client = SpannerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner.PartitionQueryRequest() + request.session = "session/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.partition_query), "__call__") as call: + call.return_value = spanner.PartitionResponse() + + client.partition_query(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "session=session/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_partition_query_field_headers_async(): + client = SpannerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner.PartitionQueryRequest() + request.session = "session/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.partition_query), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner.PartitionResponse() + ) + + await client.partition_query(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "session=session/value",) in kw["metadata"] + + +def test_partition_read( + transport: str = "grpc", request_type=spanner.PartitionReadRequest +): + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.partition_read), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = spanner.PartitionResponse() + + response = client.partition_read(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.PartitionReadRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, spanner.PartitionResponse) + + +def test_partition_read_from_dict(): + test_partition_read(request_type=dict) + + +@pytest.mark.asyncio +async def test_partition_read_async( + transport: str = "grpc_asyncio", request_type=spanner.PartitionReadRequest +): + client = SpannerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.partition_read), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner.PartitionResponse() + ) + + response = await client.partition_read(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.PartitionReadRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, spanner.PartitionResponse) + + +@pytest.mark.asyncio +async def test_partition_read_async_from_dict(): + await test_partition_read_async(request_type=dict) + + +def test_partition_read_field_headers(): + client = SpannerClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner.PartitionReadRequest() + request.session = "session/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.partition_read), "__call__") as call: + call.return_value = spanner.PartitionResponse() + + client.partition_read(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "session=session/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_partition_read_field_headers_async(): + client = SpannerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner.PartitionReadRequest() + request.session = "session/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.partition_read), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner.PartitionResponse() + ) + + await client.partition_read(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "session=session/value",) in kw["metadata"] + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.SpannerGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.SpannerGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SpannerClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.SpannerGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SpannerClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.SpannerGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = SpannerClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.SpannerGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.SpannerGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [transports.SpannerGrpcTransport, transports.SpannerGrpcAsyncIOTransport], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = SpannerClient(credentials=credentials.AnonymousCredentials(),) + assert isinstance(client.transport, transports.SpannerGrpcTransport,) + + +def test_spanner_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.SpannerTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_spanner_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.spanner_v1.services.spanner.transports.SpannerTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.SpannerTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "create_session", + "batch_create_sessions", + "get_session", + "list_sessions", + "delete_session", + "execute_sql", + "execute_streaming_sql", + "execute_batch_dml", + "read", + "streaming_read", + "begin_transaction", + "commit", + "rollback", + "partition_query", + "partition_read", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + +def test_spanner_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.spanner_v1.services.spanner.transports.SpannerTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.SpannerTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.data", + ), + quota_project_id="octopus", + ) + + +def test_spanner_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(auth, "default") as adc, mock.patch( + "google.cloud.spanner_v1.services.spanner.transports.SpannerTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.SpannerTransport() + adc.assert_called_once() + + +def test_spanner_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + SpannerClient() + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.data", + ), + quota_project_id=None, + ) + + +def test_spanner_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.SpannerGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.data", + ), + quota_project_id="octopus", + ) + + +def test_spanner_host_no_port(): + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="spanner.googleapis.com" + ), + ) + assert client.transport._host == "spanner.googleapis.com:443" + + +def test_spanner_host_with_port(): + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="spanner.googleapis.com:8000" + ), + ) + assert client.transport._host == "spanner.googleapis.com:8000" + + +def test_spanner_grpc_transport_channel(): + channel = grpc.insecure_channel("http://localhost/") + + # Check that channel is used if provided. + transport = transports.SpannerGrpcTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + + +def test_spanner_grpc_asyncio_transport_channel(): + channel = aio.insecure_channel("http://localhost/") + + # Check that channel is used if provided. + transport = transports.SpannerGrpcAsyncIOTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + + +@pytest.mark.parametrize( + "transport_class", + [transports.SpannerGrpcTransport, transports.SpannerGrpcAsyncIOTransport], +) +def test_spanner_transport_channel_mtls_with_client_cert_source(transport_class): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.data", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "transport_class", + [transports.SpannerGrpcTransport, transports.SpannerGrpcAsyncIOTransport], +) +def test_spanner_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.data", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_database_path(): + project = "squid" + instance = "clam" + database = "whelk" + + expected = "projects/{project}/instances/{instance}/databases/{database}".format( + project=project, instance=instance, database=database, + ) + actual = SpannerClient.database_path(project, instance, database) + assert expected == actual + + +def test_parse_database_path(): + expected = { + "project": "octopus", + "instance": "oyster", + "database": "nudibranch", + } + path = SpannerClient.database_path(**expected) + + # Check that the path construction is reversible. + actual = SpannerClient.parse_database_path(path) + assert expected == actual + + +def test_session_path(): + project = "cuttlefish" + instance = "mussel" + database = "winkle" + session = "nautilus" + + expected = "projects/{project}/instances/{instance}/databases/{database}/sessions/{session}".format( + project=project, instance=instance, database=database, session=session, + ) + actual = SpannerClient.session_path(project, instance, database, session) + assert expected == actual + + +def test_parse_session_path(): + expected = { + "project": "scallop", + "instance": "abalone", + "database": "squid", + "session": "clam", + } + path = SpannerClient.session_path(**expected) + + # Check that the path construction is reversible. + actual = SpannerClient.parse_session_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "whelk" + + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = SpannerClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "octopus", + } + path = SpannerClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = SpannerClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "oyster" + + expected = "folders/{folder}".format(folder=folder,) + actual = SpannerClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nudibranch", + } + path = SpannerClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = SpannerClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "cuttlefish" + + expected = "organizations/{organization}".format(organization=organization,) + actual = SpannerClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "mussel", + } + path = SpannerClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = SpannerClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "winkle" + + expected = "projects/{project}".format(project=project,) + actual = SpannerClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "nautilus", + } + path = SpannerClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = SpannerClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "scallop" + location = "abalone" + + expected = "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + actual = SpannerClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "squid", + "location": "clam", + } + path = SpannerClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = SpannerClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.SpannerTransport, "_prep_wrapped_messages" + ) as prep: + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.SpannerTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = SpannerClient.get_transport_class() + transport = transport_class( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/tests/unit/gapic/v1/test_database_admin_client_v1.py b/tests/unit/gapic/v1/test_database_admin_client_v1.py deleted file mode 100644 index baab7eb7ad..0000000000 --- a/tests/unit/gapic/v1/test_database_admin_client_v1.py +++ /dev/null @@ -1,842 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.rpc import status_pb2 - -from google.cloud import spanner_admin_database_v1 -from google.cloud.spanner_admin_database_v1.proto import backup_pb2 -from google.cloud.spanner_admin_database_v1.proto import spanner_database_admin_pb2 -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import policy_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestDatabaseAdminClient(object): - def test_create_database(self): - # Setup Expected Response - name = "name3373707" - expected_response = {"name": name} - expected_response = spanner_database_admin_pb2.Database(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_database", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - create_statement = "createStatement552974828" - - response = client.create_database(parent, create_statement) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = spanner_database_admin_pb2.CreateDatabaseRequest( - parent=parent, create_statement=create_statement - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_database_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_database_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - create_statement = "createStatement552974828" - - response = client.create_database(parent, create_statement) - exception = response.exception() - assert exception.errors[0] == error - - def test_update_database_ddl(self): - # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_update_database_ddl", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup Request - database = client.database_path("[PROJECT]", "[INSTANCE]", "[DATABASE]") - statements = [] - - response = client.update_database_ddl(database, statements) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = spanner_database_admin_pb2.UpdateDatabaseDdlRequest( - database=database, statements=statements - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_database_ddl_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_update_database_ddl_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup Request - database = client.database_path("[PROJECT]", "[INSTANCE]", "[DATABASE]") - statements = [] - - response = client.update_database_ddl(database, statements) - exception = response.exception() - assert exception.errors[0] == error - - def test_create_backup(self): - # Setup Expected Response - database = "database1789464955" - name = "name3373707" - size_bytes = 1796325715 - expected_response = { - "database": database, - "name": name, - "size_bytes": size_bytes, - } - expected_response = backup_pb2.Backup(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_backup", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - backup_id = "backupId1355353272" - backup = {} - - response = client.create_backup(parent, backup_id, backup) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = backup_pb2.CreateBackupRequest( - parent=parent, backup_id=backup_id, backup=backup - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_backup_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_backup_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - backup_id = "backupId1355353272" - backup = {} - - response = client.create_backup(parent, backup_id, backup) - exception = response.exception() - assert exception.errors[0] == error - - def test_restore_database(self): - # Setup Expected Response - name = "name3373707" - expected_response = {"name": name} - expected_response = spanner_database_admin_pb2.Database(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_restore_database", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - database_id = "databaseId816491103" - - response = client.restore_database(parent, database_id) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = spanner_database_admin_pb2.RestoreDatabaseRequest( - parent=parent, database_id=database_id - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_restore_database_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_restore_database_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - database_id = "databaseId816491103" - - response = client.restore_database(parent, database_id) - exception = response.exception() - assert exception.errors[0] == error - - def test_list_databases(self): - # Setup Expected Response - next_page_token = "" - databases_element = {} - databases = [databases_element] - expected_response = {"next_page_token": next_page_token, "databases": databases} - expected_response = spanner_database_admin_pb2.ListDatabasesResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - paged_list_response = client.list_databases(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.databases[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = spanner_database_admin_pb2.ListDatabasesRequest( - parent=parent - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_databases_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - paged_list_response = client.list_databases(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_get_database(self): - # Setup Expected Response - name_2 = "name2-1052831874" - expected_response = {"name": name_2} - expected_response = spanner_database_admin_pb2.Database(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup Request - name = client.database_path("[PROJECT]", "[INSTANCE]", "[DATABASE]") - - response = client.get_database(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = spanner_database_admin_pb2.GetDatabaseRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_database_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup request - name = client.database_path("[PROJECT]", "[INSTANCE]", "[DATABASE]") - - with pytest.raises(CustomException): - client.get_database(name) - - def test_drop_database(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup Request - database = client.database_path("[PROJECT]", "[INSTANCE]", "[DATABASE]") - - client.drop_database(database) - - assert len(channel.requests) == 1 - expected_request = spanner_database_admin_pb2.DropDatabaseRequest( - database=database - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_drop_database_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup request - database = client.database_path("[PROJECT]", "[INSTANCE]", "[DATABASE]") - - with pytest.raises(CustomException): - client.drop_database(database) - - def test_get_database_ddl(self): - # Setup Expected Response - expected_response = {} - expected_response = spanner_database_admin_pb2.GetDatabaseDdlResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup Request - database = client.database_path("[PROJECT]", "[INSTANCE]", "[DATABASE]") - - response = client.get_database_ddl(database) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = spanner_database_admin_pb2.GetDatabaseDdlRequest( - database=database - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_database_ddl_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup request - database = client.database_path("[PROJECT]", "[INSTANCE]", "[DATABASE]") - - with pytest.raises(CustomException): - client.get_database_ddl(database) - - def test_set_iam_policy(self): - # Setup Expected Response - version = 351608024 - etag = b"21" - expected_response = {"version": version, "etag": etag} - expected_response = policy_pb2.Policy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup Request - resource = "resource-341064690" - policy = {} - - response = client.set_iam_policy(resource, policy) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.SetIamPolicyRequest( - resource=resource, policy=policy - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_iam_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup request - resource = "resource-341064690" - policy = {} - - with pytest.raises(CustomException): - client.set_iam_policy(resource, policy) - - def test_get_iam_policy(self): - # Setup Expected Response - version = 351608024 - etag = b"21" - expected_response = {"version": version, "etag": etag} - expected_response = policy_pb2.Policy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup Request - resource = "resource-341064690" - - response = client.get_iam_policy(resource) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.GetIamPolicyRequest(resource=resource) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_iam_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup request - resource = "resource-341064690" - - with pytest.raises(CustomException): - client.get_iam_policy(resource) - - def test_test_iam_permissions(self): - # Setup Expected Response - expected_response = {} - expected_response = iam_policy_pb2.TestIamPermissionsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup Request - resource = "resource-341064690" - permissions = [] - - response = client.test_iam_permissions(resource, permissions) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, permissions=permissions - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_test_iam_permissions_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup request - resource = "resource-341064690" - permissions = [] - - with pytest.raises(CustomException): - client.test_iam_permissions(resource, permissions) - - def test_get_backup(self): - # Setup Expected Response - database = "database1789464955" - name_2 = "name2-1052831874" - size_bytes = 1796325715 - expected_response = { - "database": database, - "name": name_2, - "size_bytes": size_bytes, - } - expected_response = backup_pb2.Backup(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup Request - name = client.backup_path("[PROJECT]", "[INSTANCE]", "[BACKUP]") - - response = client.get_backup(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = backup_pb2.GetBackupRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_backup_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup request - name = client.backup_path("[PROJECT]", "[INSTANCE]", "[BACKUP]") - - with pytest.raises(CustomException): - client.get_backup(name) - - def test_update_backup(self): - # Setup Expected Response - database = "database1789464955" - name = "name3373707" - size_bytes = 1796325715 - expected_response = { - "database": database, - "name": name, - "size_bytes": size_bytes, - } - expected_response = backup_pb2.Backup(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup Request - backup = {} - update_mask = {} - - response = client.update_backup(backup, update_mask) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = backup_pb2.UpdateBackupRequest( - backup=backup, update_mask=update_mask - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_backup_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup request - backup = {} - update_mask = {} - - with pytest.raises(CustomException): - client.update_backup(backup, update_mask) - - def test_delete_backup(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup Request - name = client.backup_path("[PROJECT]", "[INSTANCE]", "[BACKUP]") - - client.delete_backup(name) - - assert len(channel.requests) == 1 - expected_request = backup_pb2.DeleteBackupRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_backup_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup request - name = client.backup_path("[PROJECT]", "[INSTANCE]", "[BACKUP]") - - with pytest.raises(CustomException): - client.delete_backup(name) - - def test_list_backups(self): - # Setup Expected Response - next_page_token = "" - backups_element = {} - backups = [backups_element] - expected_response = {"next_page_token": next_page_token, "backups": backups} - expected_response = backup_pb2.ListBackupsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - paged_list_response = client.list_backups(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.backups[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = backup_pb2.ListBackupsRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_backups_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - paged_list_response = client.list_backups(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_list_database_operations(self): - # Setup Expected Response - next_page_token = "" - operations_element = {} - operations = [operations_element] - expected_response = { - "next_page_token": next_page_token, - "operations": operations, - } - expected_response = spanner_database_admin_pb2.ListDatabaseOperationsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - paged_list_response = client.list_database_operations(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.operations[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = spanner_database_admin_pb2.ListDatabaseOperationsRequest( - parent=parent - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_database_operations_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - paged_list_response = client.list_database_operations(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_list_backup_operations(self): - # Setup Expected Response - next_page_token = "" - operations_element = {} - operations = [operations_element] - expected_response = { - "next_page_token": next_page_token, - "operations": operations, - } - expected_response = backup_pb2.ListBackupOperationsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - paged_list_response = client.list_backup_operations(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.operations[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = backup_pb2.ListBackupOperationsRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_backup_operations_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - paged_list_response = client.list_backup_operations(parent) - with pytest.raises(CustomException): - list(paged_list_response) diff --git a/tests/unit/gapic/v1/test_instance_admin_client_v1.py b/tests/unit/gapic/v1/test_instance_admin_client_v1.py deleted file mode 100644 index 5104645a6f..0000000000 --- a/tests/unit/gapic/v1/test_instance_admin_client_v1.py +++ /dev/null @@ -1,538 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.rpc import status_pb2 - -from google.cloud import spanner_admin_instance_v1 -from google.cloud.spanner_admin_instance_v1.proto import spanner_instance_admin_pb2 -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import policy_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestInstanceAdminClient(object): - def test_create_instance(self): - # Setup Expected Response - name = "name3373707" - config = "config-1354792126" - display_name = "displayName1615086568" - node_count = 1539922066 - expected_response = { - "name": name, - "config": config, - "display_name": display_name, - "node_count": node_count, - } - expected_response = spanner_instance_admin_pb2.Instance(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_instance", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup Request - parent = client.project_path("[PROJECT]") - instance_id = "instanceId-2101995259" - instance = {} - - response = client.create_instance(parent, instance_id, instance) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = spanner_instance_admin_pb2.CreateInstanceRequest( - parent=parent, instance_id=instance_id, instance=instance - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_instance_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_instance_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup Request - parent = client.project_path("[PROJECT]") - instance_id = "instanceId-2101995259" - instance = {} - - response = client.create_instance(parent, instance_id, instance) - exception = response.exception() - assert exception.errors[0] == error - - def test_update_instance(self): - # Setup Expected Response - name = "name3373707" - config = "config-1354792126" - display_name = "displayName1615086568" - node_count = 1539922066 - expected_response = { - "name": name, - "config": config, - "display_name": display_name, - "node_count": node_count, - } - expected_response = spanner_instance_admin_pb2.Instance(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_update_instance", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup Request - instance = {} - field_mask = {} - - response = client.update_instance(instance, field_mask) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = spanner_instance_admin_pb2.UpdateInstanceRequest( - instance=instance, field_mask=field_mask - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_instance_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_update_instance_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup Request - instance = {} - field_mask = {} - - response = client.update_instance(instance, field_mask) - exception = response.exception() - assert exception.errors[0] == error - - def test_list_instance_configs(self): - # Setup Expected Response - next_page_token = "" - instance_configs_element = {} - instance_configs = [instance_configs_element] - expected_response = { - "next_page_token": next_page_token, - "instance_configs": instance_configs, - } - expected_response = spanner_instance_admin_pb2.ListInstanceConfigsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup Request - parent = client.project_path("[PROJECT]") - - paged_list_response = client.list_instance_configs(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.instance_configs[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = spanner_instance_admin_pb2.ListInstanceConfigsRequest( - parent=parent - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_instance_configs_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup request - parent = client.project_path("[PROJECT]") - - paged_list_response = client.list_instance_configs(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_get_instance_config(self): - # Setup Expected Response - name_2 = "name2-1052831874" - display_name = "displayName1615086568" - expected_response = {"name": name_2, "display_name": display_name} - expected_response = spanner_instance_admin_pb2.InstanceConfig( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup Request - name = client.instance_config_path("[PROJECT]", "[INSTANCE_CONFIG]") - - response = client.get_instance_config(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = spanner_instance_admin_pb2.GetInstanceConfigRequest( - name=name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_instance_config_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup request - name = client.instance_config_path("[PROJECT]", "[INSTANCE_CONFIG]") - - with pytest.raises(CustomException): - client.get_instance_config(name) - - def test_list_instances(self): - # Setup Expected Response - next_page_token = "" - instances_element = {} - instances = [instances_element] - expected_response = {"next_page_token": next_page_token, "instances": instances} - expected_response = spanner_instance_admin_pb2.ListInstancesResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup Request - parent = client.project_path("[PROJECT]") - - paged_list_response = client.list_instances(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.instances[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = spanner_instance_admin_pb2.ListInstancesRequest( - parent=parent - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_instances_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup request - parent = client.project_path("[PROJECT]") - - paged_list_response = client.list_instances(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_get_instance(self): - # Setup Expected Response - name_2 = "name2-1052831874" - config = "config-1354792126" - display_name = "displayName1615086568" - node_count = 1539922066 - expected_response = { - "name": name_2, - "config": config, - "display_name": display_name, - "node_count": node_count, - } - expected_response = spanner_instance_admin_pb2.Instance(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup Request - name = client.instance_path("[PROJECT]", "[INSTANCE]") - - response = client.get_instance(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = spanner_instance_admin_pb2.GetInstanceRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_instance_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup request - name = client.instance_path("[PROJECT]", "[INSTANCE]") - - with pytest.raises(CustomException): - client.get_instance(name) - - def test_delete_instance(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup Request - name = client.instance_path("[PROJECT]", "[INSTANCE]") - - client.delete_instance(name) - - assert len(channel.requests) == 1 - expected_request = spanner_instance_admin_pb2.DeleteInstanceRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_instance_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup request - name = client.instance_path("[PROJECT]", "[INSTANCE]") - - with pytest.raises(CustomException): - client.delete_instance(name) - - def test_set_iam_policy(self): - # Setup Expected Response - version = 351608024 - etag = b"21" - expected_response = {"version": version, "etag": etag} - expected_response = policy_pb2.Policy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup Request - resource = "resource-341064690" - policy = {} - - response = client.set_iam_policy(resource, policy) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.SetIamPolicyRequest( - resource=resource, policy=policy - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_iam_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup request - resource = "resource-341064690" - policy = {} - - with pytest.raises(CustomException): - client.set_iam_policy(resource, policy) - - def test_get_iam_policy(self): - # Setup Expected Response - version = 351608024 - etag = b"21" - expected_response = {"version": version, "etag": etag} - expected_response = policy_pb2.Policy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup Request - resource = "resource-341064690" - - response = client.get_iam_policy(resource) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.GetIamPolicyRequest(resource=resource) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_iam_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup request - resource = "resource-341064690" - - with pytest.raises(CustomException): - client.get_iam_policy(resource) - - def test_test_iam_permissions(self): - # Setup Expected Response - expected_response = {} - expected_response = iam_policy_pb2.TestIamPermissionsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup Request - resource = "resource-341064690" - permissions = [] - - response = client.test_iam_permissions(resource, permissions) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, permissions=permissions - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_test_iam_permissions_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup request - resource = "resource-341064690" - permissions = [] - - with pytest.raises(CustomException): - client.test_iam_permissions(resource, permissions) diff --git a/tests/unit/gapic/v1/test_spanner_client_v1.py b/tests/unit/gapic/v1/test_spanner_client_v1.py deleted file mode 100644 index a133902658..0000000000 --- a/tests/unit/gapic/v1/test_spanner_client_v1.py +++ /dev/null @@ -1,722 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.cloud.spanner_v1.gapic import spanner_client as spanner_v1 -from google.cloud.spanner_v1.proto import keys_pb2 -from google.cloud.spanner_v1.proto import result_set_pb2 -from google.cloud.spanner_v1.proto import spanner_pb2 -from google.cloud.spanner_v1.proto import transaction_pb2 -from google.protobuf import empty_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - def unary_stream(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestSpannerClient(object): - def test_create_session(self): - # Setup Expected Response - name = "name3373707" - expected_response = {"name": name} - expected_response = spanner_pb2.Session(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup Request - database = client.database_path("[PROJECT]", "[INSTANCE]", "[DATABASE]") - - response = client.create_session(database) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = spanner_pb2.CreateSessionRequest(database=database) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_session_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup request - database = client.database_path("[PROJECT]", "[INSTANCE]", "[DATABASE]") - - with pytest.raises(CustomException): - client.create_session(database) - - def test_batch_create_sessions(self): - # Setup Expected Response - expected_response = {} - expected_response = spanner_pb2.BatchCreateSessionsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup Request - database = client.database_path("[PROJECT]", "[INSTANCE]", "[DATABASE]") - session_count = 185691686 - - response = client.batch_create_sessions(database, session_count) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = spanner_pb2.BatchCreateSessionsRequest( - database=database, session_count=session_count - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_batch_create_sessions_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup request - database = client.database_path("[PROJECT]", "[INSTANCE]", "[DATABASE]") - session_count = 185691686 - - with pytest.raises(CustomException): - client.batch_create_sessions(database, session_count) - - def test_get_session(self): - # Setup Expected Response - name_2 = "name2-1052831874" - expected_response = {"name": name_2} - expected_response = spanner_pb2.Session(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup Request - name = client.session_path("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") - - response = client.get_session(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = spanner_pb2.GetSessionRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_session_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup request - name = client.session_path("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") - - with pytest.raises(CustomException): - client.get_session(name) - - def test_list_sessions(self): - # Setup Expected Response - next_page_token = "" - sessions_element = {} - sessions = [sessions_element] - expected_response = {"next_page_token": next_page_token, "sessions": sessions} - expected_response = spanner_pb2.ListSessionsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup Request - database = client.database_path("[PROJECT]", "[INSTANCE]", "[DATABASE]") - - paged_list_response = client.list_sessions(database) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.sessions[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = spanner_pb2.ListSessionsRequest(database=database) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_sessions_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup request - database = client.database_path("[PROJECT]", "[INSTANCE]", "[DATABASE]") - - paged_list_response = client.list_sessions(database) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_delete_session(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup Request - name = client.session_path("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") - - client.delete_session(name) - - assert len(channel.requests) == 1 - expected_request = spanner_pb2.DeleteSessionRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_session_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup request - name = client.session_path("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") - - with pytest.raises(CustomException): - client.delete_session(name) - - def test_execute_sql(self): - # Setup Expected Response - expected_response = {} - expected_response = result_set_pb2.ResultSet(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup Request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - sql = "sql114126" - - response = client.execute_sql(session, sql) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = spanner_pb2.ExecuteSqlRequest(session=session, sql=sql) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_execute_sql_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - sql = "sql114126" - - with pytest.raises(CustomException): - client.execute_sql(session, sql) - - def test_execute_streaming_sql(self): - # Setup Expected Response - chunked_value = True - resume_token = b"103" - expected_response = { - "chunked_value": chunked_value, - "resume_token": resume_token, - } - expected_response = result_set_pb2.PartialResultSet(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[iter([expected_response])]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup Request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - sql = "sql114126" - - response = client.execute_streaming_sql(session, sql) - resources = list(response) - assert len(resources) == 1 - assert expected_response == resources[0] - - assert len(channel.requests) == 1 - expected_request = spanner_pb2.ExecuteSqlRequest(session=session, sql=sql) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_execute_streaming_sql_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - sql = "sql114126" - - with pytest.raises(CustomException): - client.execute_streaming_sql(session, sql) - - def test_execute_batch_dml(self): - # Setup Expected Response - expected_response = {} - expected_response = spanner_pb2.ExecuteBatchDmlResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup Request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - transaction = {} - statements = [] - seqno = 109325920 - - response = client.execute_batch_dml(session, transaction, statements, seqno) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = spanner_pb2.ExecuteBatchDmlRequest( - session=session, transaction=transaction, statements=statements, seqno=seqno - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_execute_batch_dml_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - transaction = {} - statements = [] - seqno = 109325920 - - with pytest.raises(CustomException): - client.execute_batch_dml(session, transaction, statements, seqno) - - def test_read(self): - # Setup Expected Response - expected_response = {} - expected_response = result_set_pb2.ResultSet(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup Request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - table = "table110115790" - columns = [] - key_set = {} - - response = client.read(session, table, columns, key_set) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = spanner_pb2.ReadRequest( - session=session, table=table, columns=columns, key_set=key_set - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_read_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - table = "table110115790" - columns = [] - key_set = {} - - with pytest.raises(CustomException): - client.read(session, table, columns, key_set) - - def test_streaming_read(self): - # Setup Expected Response - chunked_value = True - resume_token = b"103" - expected_response = { - "chunked_value": chunked_value, - "resume_token": resume_token, - } - expected_response = result_set_pb2.PartialResultSet(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[iter([expected_response])]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup Request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - table = "table110115790" - columns = [] - key_set = {} - - response = client.streaming_read(session, table, columns, key_set) - resources = list(response) - assert len(resources) == 1 - assert expected_response == resources[0] - - assert len(channel.requests) == 1 - expected_request = spanner_pb2.ReadRequest( - session=session, table=table, columns=columns, key_set=key_set - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_streaming_read_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - table = "table110115790" - columns = [] - key_set = {} - - with pytest.raises(CustomException): - client.streaming_read(session, table, columns, key_set) - - def test_begin_transaction(self): - # Setup Expected Response - id_ = b"27" - expected_response = {"id": id_} - expected_response = transaction_pb2.Transaction(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup Request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - options_ = {} - - response = client.begin_transaction(session, options_) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = spanner_pb2.BeginTransactionRequest( - session=session, options=options_ - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_begin_transaction_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - options_ = {} - - with pytest.raises(CustomException): - client.begin_transaction(session, options_) - - def test_commit(self): - # Setup Expected Response - expected_response = {} - expected_response = spanner_pb2.CommitResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup Request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - - response = client.commit(session) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = spanner_pb2.CommitRequest(session=session) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_commit_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - - with pytest.raises(CustomException): - client.commit(session) - - def test_rollback(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup Request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - transaction_id = b"28" - - client.rollback(session, transaction_id) - - assert len(channel.requests) == 1 - expected_request = spanner_pb2.RollbackRequest( - session=session, transaction_id=transaction_id - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_rollback_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - transaction_id = b"28" - - with pytest.raises(CustomException): - client.rollback(session, transaction_id) - - def test_partition_query(self): - # Setup Expected Response - expected_response = {} - expected_response = spanner_pb2.PartitionResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup Request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - sql = "sql114126" - - response = client.partition_query(session, sql) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = spanner_pb2.PartitionQueryRequest(session=session, sql=sql) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_partition_query_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - sql = "sql114126" - - with pytest.raises(CustomException): - client.partition_query(session, sql) - - def test_partition_read(self): - # Setup Expected Response - expected_response = {} - expected_response = spanner_pb2.PartitionResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup Request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - table = "table110115790" - key_set = {} - - response = client.partition_read(session, table, key_set) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = spanner_pb2.PartitionReadRequest( - session=session, table=table, key_set=key_set - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_partition_read_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - table = "table110115790" - key_set = {} - - with pytest.raises(CustomException): - client.partition_read(session, table, key_set) diff --git a/tests/unit/test__helpers.py b/tests/unit/test__helpers.py index c23188184c..5d6b015505 100644 --- a/tests/unit/test__helpers.py +++ b/tests/unit/test__helpers.py @@ -28,7 +28,7 @@ def test_base_none_and_merge_none(self): self.assertIsNone(result) def test_base_dict_and_merge_none(self): - from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest + from google.cloud.spanner_v1 import ExecuteSqlRequest base = {"optimizer_version": "2"} merge = None @@ -37,7 +37,7 @@ def test_base_dict_and_merge_none(self): self.assertEqual(result, expected) def test_base_empty_and_merge_empty(self): - from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest + from google.cloud.spanner_v1 import ExecuteSqlRequest base = ExecuteSqlRequest.QueryOptions() merge = ExecuteSqlRequest.QueryOptions() @@ -45,7 +45,7 @@ def test_base_empty_and_merge_empty(self): self.assertIsNone(result) def test_base_none_merge_object(self): - from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest + from google.cloud.spanner_v1 import ExecuteSqlRequest base = None merge = ExecuteSqlRequest.QueryOptions(optimizer_version="3") @@ -53,7 +53,7 @@ def test_base_none_merge_object(self): self.assertEqual(result, merge) def test_base_none_merge_dict(self): - from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest + from google.cloud.spanner_v1 import ExecuteSqlRequest base = None merge = {"optimizer_version": "3"} @@ -62,7 +62,7 @@ def test_base_none_merge_dict(self): self.assertEqual(result, expected) def test_base_object_merge_dict(self): - from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest + from google.cloud.spanner_v1 import ExecuteSqlRequest base = ExecuteSqlRequest.QueryOptions(optimizer_version="1") merge = {"optimizer_version": "3"} @@ -146,6 +146,13 @@ def test_w_float(self): self.assertIsInstance(value_pb, Value) self.assertEqual(value_pb.number_value, 3.14159) + def test_w_float_str(self): + from google.protobuf.struct_pb2 import Value + + value_pb = self._callFUT(3.14159) + self.assertIsInstance(value_pb, Value) + self.assertEqual(value_pb.number_value, 3.14159) + def test_w_float_nan(self): from google.protobuf.struct_pb2 import Value @@ -290,6 +297,174 @@ def test_w_multiple_values(self): self.assertEqual(found.values[1].string_value, expected[1]) +class Test_parse_value(unittest.TestCase): + def _callFUT(self, *args, **kw): + from google.cloud.spanner_v1._helpers import _parse_value + + return _parse_value(*args, **kw) + + def test_w_null(self): + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import TypeCode + + field_type = Type(code=TypeCode.STRING) + value = expected_value = None + + self.assertEqual(self._callFUT(value, field_type), expected_value) + + def test_w_string(self): + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import TypeCode + + field_type = Type(code=TypeCode.STRING) + value = expected_value = u"Value" + + self.assertEqual(self._callFUT(value, field_type), expected_value) + + def test_w_bytes(self): + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import TypeCode + + field_type = Type(code=TypeCode.BYTES) + value = "Value" + expected_value = b"Value" + + self.assertEqual(self._callFUT(value, field_type), expected_value) + + def test_w_bool(self): + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import TypeCode + + field_type = Type(code=TypeCode.BOOL) + value = expected_value = True + + self.assertEqual(self._callFUT(value, field_type), expected_value) + + def test_w_int(self): + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import TypeCode + + field_type = Type(code=TypeCode.INT64) + value = "12345" + expected_value = 12345 + + self.assertEqual(self._callFUT(value, field_type), expected_value) + + def test_w_float(self): + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import TypeCode + + field_type = Type(code=TypeCode.FLOAT64) + value = "3.14159" + expected_value = 3.14159 + + self.assertEqual(self._callFUT(value, field_type), expected_value) + + def test_w_date(self): + import datetime + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import TypeCode + + value = "2020-09-22" + expected_value = datetime.date(2020, 9, 22) + field_type = Type(code=TypeCode.DATE) + + self.assertEqual(self._callFUT(value, field_type), expected_value) + + def test_w_timestamp_wo_nanos(self): + import pytz + from google.api_core import datetime_helpers + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import TypeCode + + field_type = Type(code=TypeCode.TIMESTAMP) + value = "2016-12-20T21:13:47.123456Z" + expected_value = datetime_helpers.DatetimeWithNanoseconds( + 2016, 12, 20, 21, 13, 47, microsecond=123456, tzinfo=pytz.UTC + ) + + parsed = self._callFUT(value, field_type) + self.assertIsInstance(parsed, datetime_helpers.DatetimeWithNanoseconds) + self.assertEqual(parsed, expected_value) + + def test_w_timestamp_w_nanos(self): + import pytz + from google.api_core import datetime_helpers + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import TypeCode + + field_type = Type(code=TypeCode.TIMESTAMP) + value = "2016-12-20T21:13:47.123456789Z" + expected_value = datetime_helpers.DatetimeWithNanoseconds( + 2016, 12, 20, 21, 13, 47, nanosecond=123456789, tzinfo=pytz.UTC + ) + + parsed = self._callFUT(value, field_type) + self.assertIsInstance(parsed, datetime_helpers.DatetimeWithNanoseconds) + self.assertEqual(parsed, expected_value) + + def test_w_array_empty(self): + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import TypeCode + + field_type = Type( + code=TypeCode.ARRAY, array_element_type=Type(code=TypeCode.INT64) + ) + value = [] + + self.assertEqual(self._callFUT(value, field_type), []) + + def test_w_array_non_empty(self): + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import TypeCode + + field_type = Type( + code=TypeCode.ARRAY, array_element_type=Type(code=TypeCode.INT64) + ) + values = ["32", "19", "5"] + expected_values = [32, 19, 5] + + self.assertEqual(self._callFUT(values, field_type), expected_values) + + def test_w_struct(self): + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import StructType + from google.cloud.spanner_v1 import TypeCode + + struct_type_pb = StructType( + fields=[ + StructType.Field(name="name", type_=Type(code=TypeCode.STRING)), + StructType.Field(name="age", type_=Type(code=TypeCode.INT64)), + ] + ) + field_type = Type(code=TypeCode.STRUCT, struct_type=struct_type_pb) + values = [u"phred", "32"] + expected_values = [u"phred", 32] + + self.assertEqual(self._callFUT(values, field_type), expected_values) + + def test_w_numeric(self): + import decimal + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import TypeCode + + field_type = Type(code=TypeCode.NUMERIC) + expected_value = decimal.Decimal("99999999999999999999999999999.999999999") + value = "99999999999999999999999999999.999999999" + + self.assertEqual(self._callFUT(value, field_type), expected_value) + + def test_w_unknown_type(self): + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import TypeCode + + field_type = Type(code=TypeCode.TYPE_CODE_UNSPECIFIED) + value_pb = object() + + with self.assertRaises(ValueError): + self._callFUT(value_pb, field_type) + + class Test_parse_value_pb(unittest.TestCase): def _callFUT(self, *args, **kw): from google.cloud.spanner_v1._helpers import _parse_value_pb @@ -298,70 +473,89 @@ def _callFUT(self, *args, **kw): def test_w_null(self): from google.protobuf.struct_pb2 import Value, NULL_VALUE - from google.cloud.spanner_v1.proto.type_pb2 import Type, STRING + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import TypeCode - field_type = Type(code=STRING) + field_type = Type(code=TypeCode.STRING) value_pb = Value(null_value=NULL_VALUE) self.assertEqual(self._callFUT(value_pb, field_type), None) def test_w_string(self): from google.protobuf.struct_pb2 import Value - from google.cloud.spanner_v1.proto.type_pb2 import Type, STRING + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import TypeCode VALUE = u"Value" - field_type = Type(code=STRING) + field_type = Type(code=TypeCode.STRING) value_pb = Value(string_value=VALUE) self.assertEqual(self._callFUT(value_pb, field_type), VALUE) def test_w_bytes(self): from google.protobuf.struct_pb2 import Value - from google.cloud.spanner_v1.proto.type_pb2 import Type, BYTES + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import TypeCode VALUE = b"Value" - field_type = Type(code=BYTES) + field_type = Type(code=TypeCode.BYTES) value_pb = Value(string_value=VALUE) self.assertEqual(self._callFUT(value_pb, field_type), VALUE) def test_w_bool(self): from google.protobuf.struct_pb2 import Value - from google.cloud.spanner_v1.proto.type_pb2 import Type, BOOL + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import TypeCode VALUE = True - field_type = Type(code=BOOL) + field_type = Type(code=TypeCode.BOOL) value_pb = Value(bool_value=VALUE) self.assertEqual(self._callFUT(value_pb, field_type), VALUE) def test_w_int(self): from google.protobuf.struct_pb2 import Value - from google.cloud.spanner_v1.proto.type_pb2 import Type, INT64 + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import TypeCode VALUE = 12345 - field_type = Type(code=INT64) + field_type = Type(code=TypeCode.INT64) value_pb = Value(string_value=str(VALUE)) self.assertEqual(self._callFUT(value_pb, field_type), VALUE) def test_w_float(self): from google.protobuf.struct_pb2 import Value - from google.cloud.spanner_v1.proto.type_pb2 import Type, FLOAT64 + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import TypeCode VALUE = 3.14159 - field_type = Type(code=FLOAT64) + field_type = Type(code=TypeCode.FLOAT64) value_pb = Value(number_value=VALUE) self.assertEqual(self._callFUT(value_pb, field_type), VALUE) + def test_w_float_str(self): + from google.protobuf.struct_pb2 import Value + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import TypeCode + + VALUE = "3.14159" + field_type = Type(code=TypeCode.FLOAT64) + value_pb = Value(string_value=VALUE) + expected_value = 3.14159 + + self.assertEqual(self._callFUT(value_pb, field_type), expected_value) + def test_w_date(self): import datetime from google.protobuf.struct_pb2 import Value - from google.cloud.spanner_v1.proto.type_pb2 import Type, DATE + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import TypeCode VALUE = datetime.date.today() - field_type = Type(code=DATE) + field_type = Type(code=TypeCode.DATE) value_pb = Value(string_value=VALUE.isoformat()) self.assertEqual(self._callFUT(value_pb, field_type), VALUE) @@ -370,13 +564,13 @@ def test_w_timestamp_wo_nanos(self): import pytz from google.protobuf.struct_pb2 import Value from google.api_core import datetime_helpers - from google.cloud.spanner_v1.proto.type_pb2 import TIMESTAMP - from google.cloud.spanner_v1.proto.type_pb2 import Type + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import TypeCode value = datetime_helpers.DatetimeWithNanoseconds( 2016, 12, 20, 21, 13, 47, microsecond=123456, tzinfo=pytz.UTC ) - field_type = Type(code=TIMESTAMP) + field_type = Type(code=TypeCode.TIMESTAMP) value_pb = Value(string_value=datetime_helpers.to_rfc3339(value)) parsed = self._callFUT(value_pb, field_type) @@ -387,13 +581,13 @@ def test_w_timestamp_w_nanos(self): import pytz from google.protobuf.struct_pb2 import Value from google.api_core import datetime_helpers - from google.cloud.spanner_v1.proto.type_pb2 import TIMESTAMP - from google.cloud.spanner_v1.proto.type_pb2 import Type + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import TypeCode value = datetime_helpers.DatetimeWithNanoseconds( 2016, 12, 20, 21, 13, 47, nanosecond=123456789, tzinfo=pytz.UTC ) - field_type = Type(code=TIMESTAMP) + field_type = Type(code=TypeCode.TIMESTAMP) value_pb = Value(string_value=datetime_helpers.to_rfc3339(value)) parsed = self._callFUT(value_pb, field_type) @@ -401,19 +595,25 @@ def test_w_timestamp_w_nanos(self): self.assertEqual(parsed, value) def test_w_array_empty(self): - from google.protobuf.struct_pb2 import Value - from google.cloud.spanner_v1.proto.type_pb2 import Type, ARRAY, INT64 + from google.protobuf.struct_pb2 import Value, ListValue + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import TypeCode - field_type = Type(code=ARRAY, array_element_type=Type(code=INT64)) - value_pb = Value() + field_type = Type( + code=TypeCode.ARRAY, array_element_type=Type(code=TypeCode.INT64) + ) + value_pb = Value(list_value=ListValue(values=[])) self.assertEqual(self._callFUT(value_pb, field_type), []) def test_w_array_non_empty(self): from google.protobuf.struct_pb2 import Value, ListValue - from google.cloud.spanner_v1.proto.type_pb2 import Type, ARRAY, INT64 + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import TypeCode - field_type = Type(code=ARRAY, array_element_type=Type(code=INT64)) + field_type = Type( + code=TypeCode.ARRAY, array_element_type=Type(code=TypeCode.INT64) + ) VALUES = [32, 19, 5] values_pb = ListValue( values=[Value(string_value=str(value)) for value in VALUES] @@ -424,18 +624,19 @@ def test_w_array_non_empty(self): def test_w_struct(self): from google.protobuf.struct_pb2 import Value - from google.cloud.spanner_v1.proto.type_pb2 import Type, StructType - from google.cloud.spanner_v1.proto.type_pb2 import STRUCT, STRING, INT64 + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import StructType + from google.cloud.spanner_v1 import TypeCode from google.cloud.spanner_v1._helpers import _make_list_value_pb VALUES = [u"phred", 32] struct_type_pb = StructType( fields=[ - StructType.Field(name="name", type=Type(code=STRING)), - StructType.Field(name="age", type=Type(code=INT64)), + StructType.Field(name="name", type_=Type(code=TypeCode.STRING)), + StructType.Field(name="age", type_=Type(code=TypeCode.INT64)), ] ) - field_type = Type(code=STRUCT, struct_type=struct_type_pb) + field_type = Type(code=TypeCode.STRUCT, struct_type=struct_type_pb) value_pb = Value(list_value=_make_list_value_pb(VALUES)) self.assertEqual(self._callFUT(value_pb, field_type), VALUES) @@ -443,25 +644,37 @@ def test_w_struct(self): def test_w_numeric(self): import decimal from google.protobuf.struct_pb2 import Value - from google.cloud.spanner_v1.proto.type_pb2 import Type, NUMERIC + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import TypeCode VALUE = decimal.Decimal("99999999999999999999999999999.999999999") - field_type = Type(code=NUMERIC) + field_type = Type(code=TypeCode.NUMERIC) value_pb = Value(string_value=str(VALUE)) self.assertEqual(self._callFUT(value_pb, field_type), VALUE) def test_w_unknown_type(self): from google.protobuf.struct_pb2 import Value - from google.cloud.spanner_v1.proto.type_pb2 import Type - from google.cloud.spanner_v1.proto.type_pb2 import TYPE_CODE_UNSPECIFIED + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import TypeCode - field_type = Type(code=TYPE_CODE_UNSPECIFIED) + field_type = Type(code=TypeCode.TYPE_CODE_UNSPECIFIED) value_pb = Value(string_value="Borked") with self.assertRaises(ValueError): self._callFUT(value_pb, field_type) + def test_w_empty_value(self): + from google.protobuf.struct_pb2 import Value + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import TypeCode + + field_type = Type(code=TypeCode.STRING) + value_pb = Value() + + with self.assertRaises(ValueError): + self._callFUT(value_pb, field_type) + class Test_parse_list_value_pbs(unittest.TestCase): def _callFUT(self, *args, **kw): @@ -470,28 +683,30 @@ def _callFUT(self, *args, **kw): return _parse_list_value_pbs(*args, **kw) def test_empty(self): - from google.cloud.spanner_v1.proto.type_pb2 import Type, StructType - from google.cloud.spanner_v1.proto.type_pb2 import STRING, INT64 + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import StructType + from google.cloud.spanner_v1 import TypeCode struct_type_pb = StructType( fields=[ - StructType.Field(name="name", type=Type(code=STRING)), - StructType.Field(name="age", type=Type(code=INT64)), + StructType.Field(name="name", type_=Type(code=TypeCode.STRING)), + StructType.Field(name="age", type_=Type(code=TypeCode.INT64)), ] ) self.assertEqual(self._callFUT(rows=[], row_type=struct_type_pb), []) def test_non_empty(self): - from google.cloud.spanner_v1.proto.type_pb2 import Type, StructType - from google.cloud.spanner_v1.proto.type_pb2 import STRING, INT64 + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import StructType + from google.cloud.spanner_v1 import TypeCode from google.cloud.spanner_v1._helpers import _make_list_value_pbs VALUES = [[u"phred", 32], [u"bharney", 31]] struct_type_pb = StructType( fields=[ - StructType.Field(name="name", type=Type(code=STRING)), - StructType.Field(name="age", type=Type(code=INT64)), + StructType.Field(name="name", type_=Type(code=TypeCode.STRING)), + StructType.Field(name="age", type_=Type(code=TypeCode.INT64)), ] ) values_pbs = _make_list_value_pbs(VALUES) diff --git a/tests/unit/test__opentelemetry_tracing.py b/tests/unit/test__opentelemetry_tracing.py index 8e26468dfe..cfd3241718 100644 --- a/tests/unit/test__opentelemetry_tracing.py +++ b/tests/unit/test__opentelemetry_tracing.py @@ -57,8 +57,8 @@ def test_trace_call(self): expected_attributes = { "db.type": "spanner", - "db.url": "spanner.googleapis.com:443", - "net.host.name": "spanner.googleapis.com:443", + "db.url": "spanner.googleapis.com", + "net.host.name": "spanner.googleapis.com", } expected_attributes.update(extra_attributes) @@ -82,8 +82,8 @@ def test_trace_error(self): expected_attributes = { "db.type": "spanner", - "db.url": "spanner.googleapis.com:443", - "net.host.name": "spanner.googleapis.com:443", + "db.url": "spanner.googleapis.com", + "net.host.name": "spanner.googleapis.com", } expected_attributes.update(extra_attributes) @@ -99,7 +99,7 @@ def test_trace_error(self): self.assertEqual(len(span_list), 1) span = span_list[0] self.assertEqual(span.kind, trace_api.SpanKind.CLIENT) - self.assertEqual(span.attributes, expected_attributes) + self.assertEqual(dict(span.attributes), expected_attributes) self.assertEqual(span.name, "CloudSpanner.Test") self.assertEqual( span.status.canonical_code, StatusCanonicalCode.INVALID_ARGUMENT @@ -121,9 +121,30 @@ def test_trace_grpc_error(self): ) as span: from google.api_core.exceptions import DataLoss - raise _make_rpc_error(DataLoss) + raise DataLoss("error") span_list = self.memory_exporter.get_finished_spans() self.assertEqual(len(span_list), 1) span = span_list[0] self.assertEqual(span.status.canonical_code, StatusCanonicalCode.DATA_LOSS) + + def test_trace_codeless_error(self): + extra_attributes = {"db.instance": "database_name"} + + expected_attributes = { + "db.type": "spanner", + "db.url": "spanner.googleapis.com:443", + "net.host.name": "spanner.googleapis.com:443", + } + expected_attributes.update(extra_attributes) + + with self.assertRaises(GoogleAPICallError): + with _opentelemetry_tracing.trace_call( + "CloudSpanner.Test", _make_session(), extra_attributes + ) as span: + raise GoogleAPICallError("error") + + span_list = self.memory_exporter.get_finished_spans() + self.assertEqual(len(span_list), 1) + span = span_list[0] + self.assertEqual(span.status.canonical_code, StatusCanonicalCode.UNKNOWN) diff --git a/tests/unit/test_backup.py b/tests/unit/test_backup.py index 0762305220..748c460291 100644 --- a/tests/unit/test_backup.py +++ b/tests/unit/test_backup.py @@ -47,7 +47,7 @@ def _get_target_class(self): @staticmethod def _make_database_admin_api(): - from google.cloud.spanner_v1.client import DatabaseAdminClient + from google.cloud.spanner_admin_database_v1 import DatabaseAdminClient return mock.create_autospec(DatabaseAdminClient, instance=True) @@ -76,46 +76,46 @@ def test_ctor_non_defaults(self): self.assertIs(backup._expire_time, timestamp) def test_from_pb_project_mismatch(self): - from google.cloud.spanner_admin_database_v1.proto import backup_pb2 + from google.cloud.spanner_admin_database_v1 import Backup ALT_PROJECT = "ALT_PROJECT" client = _Client(project=ALT_PROJECT) instance = _Instance(self.INSTANCE_NAME, client) - backup_pb = backup_pb2.Backup(name=self.BACKUP_NAME) + backup_pb = Backup(name=self.BACKUP_NAME) backup_class = self._get_target_class() with self.assertRaises(ValueError): backup_class.from_pb(backup_pb, instance) def test_from_pb_instance_mismatch(self): - from google.cloud.spanner_admin_database_v1.proto import backup_pb2 + from google.cloud.spanner_admin_database_v1 import Backup ALT_INSTANCE = "/projects/%s/instances/ALT-INSTANCE" % (self.PROJECT_ID,) client = _Client() instance = _Instance(ALT_INSTANCE, client) - backup_pb = backup_pb2.Backup(name=self.BACKUP_NAME) + backup_pb = Backup(name=self.BACKUP_NAME) backup_class = self._get_target_class() with self.assertRaises(ValueError): backup_class.from_pb(backup_pb, instance) def test_from_pb_invalid_name(self): - from google.cloud.spanner_admin_database_v1.proto import backup_pb2 + from google.cloud.spanner_admin_database_v1 import Backup client = _Client() instance = _Instance(self.INSTANCE_NAME, client) - backup_pb = backup_pb2.Backup(name="invalid_format") + backup_pb = Backup(name="invalid_format") backup_class = self._get_target_class() with self.assertRaises(ValueError): backup_class.from_pb(backup_pb, instance) def test_from_pb_success(self): - from google.cloud.spanner_admin_database_v1.proto import backup_pb2 + from google.cloud.spanner_admin_database_v1 import Backup client = _Client() instance = _Instance(self.INSTANCE_NAME, client) - backup_pb = backup_pb2.Backup(name=self.BACKUP_NAME) + backup_pb = Backup(name=self.BACKUP_NAME) backup_class = self._get_target_class() backup = backup_class.from_pb(backup_pb, instance) @@ -157,11 +157,11 @@ def test_size_bytes_property(self): self.assertEqual(backup.size_bytes, expected) def test_state_property(self): - from google.cloud.spanner_admin_database_v1.gapic import enums + from google.cloud.spanner_admin_database_v1 import Backup instance = _Instance(self.INSTANCE_NAME) backup = self._make_one(self.BACKUP_ID, instance) - expected = backup._state = enums.Backup.State.READY + expected = backup._state = Backup.State.READY self.assertEqual(backup.state, expected) def test_referencing_databases_property(self): @@ -173,6 +173,7 @@ def test_referencing_databases_property(self): def test_create_grpc_error(self): from google.api_core.exceptions import GoogleAPICallError from google.api_core.exceptions import Unknown + from google.cloud.spanner_admin_database_v1 import Backup client = _Client() api = client.database_admin_api = self._make_database_admin_api() @@ -184,12 +185,7 @@ def test_create_grpc_error(self): self.BACKUP_ID, instance, database=self.DATABASE_NAME, expire_time=timestamp ) - from google.cloud._helpers import _datetime_to_pb_timestamp - - backup_pb = { - "database": self.DATABASE_NAME, - "expire_time": _datetime_to_pb_timestamp(timestamp), - } + backup_pb = Backup(database=self.DATABASE_NAME, expire_time=timestamp,) with self.assertRaises(GoogleAPICallError): backup.create() @@ -203,6 +199,7 @@ def test_create_grpc_error(self): def test_create_already_exists(self): from google.cloud.exceptions import Conflict + from google.cloud.spanner_admin_database_v1 import Backup client = _Client() api = client.database_admin_api = self._make_database_admin_api() @@ -214,12 +211,7 @@ def test_create_already_exists(self): self.BACKUP_ID, instance, database=self.DATABASE_NAME, expire_time=timestamp ) - from google.cloud._helpers import _datetime_to_pb_timestamp - - backup_pb = { - "database": self.DATABASE_NAME, - "expire_time": _datetime_to_pb_timestamp(timestamp), - } + backup_pb = Backup(database=self.DATABASE_NAME, expire_time=timestamp,) with self.assertRaises(Conflict): backup.create() @@ -233,6 +225,7 @@ def test_create_already_exists(self): def test_create_instance_not_found(self): from google.cloud.exceptions import NotFound + from google.cloud.spanner_admin_database_v1 import Backup client = _Client() api = client.database_admin_api = self._make_database_admin_api() @@ -244,12 +237,7 @@ def test_create_instance_not_found(self): self.BACKUP_ID, instance, database=self.DATABASE_NAME, expire_time=timestamp ) - from google.cloud._helpers import _datetime_to_pb_timestamp - - backup_pb = { - "database": self.DATABASE_NAME, - "expire_time": _datetime_to_pb_timestamp(timestamp), - } + backup_pb = Backup(database=self.DATABASE_NAME, expire_time=timestamp,) with self.assertRaises(NotFound): backup.create() @@ -277,6 +265,8 @@ def test_create_database_not_set(self): backup.create() def test_create_success(self): + from google.cloud.spanner_admin_database_v1 import Backup + op_future = object() client = _Client() api = client.database_admin_api = self._make_database_admin_api() @@ -288,12 +278,7 @@ def test_create_success(self): self.BACKUP_ID, instance, database=self.DATABASE_NAME, expire_time=timestamp ) - from google.cloud._helpers import _datetime_to_pb_timestamp - - backup_pb = { - "database": self.DATABASE_NAME, - "expire_time": _datetime_to_pb_timestamp(timestamp), - } + backup_pb = Backup(database=self.DATABASE_NAME, expire_time=timestamp,) future = backup.create() self.assertIs(future, op_future) @@ -319,7 +304,8 @@ def test_exists_grpc_error(self): backup.exists() api.get_backup.assert_called_once_with( - self.BACKUP_NAME, metadata=[("google-cloud-resource-prefix", backup.name)] + name=self.BACKUP_NAME, + metadata=[("google-cloud-resource-prefix", backup.name)], ) def test_exists_not_found(self): @@ -335,14 +321,15 @@ def test_exists_not_found(self): self.assertFalse(backup.exists()) api.get_backup.assert_called_once_with( - self.BACKUP_NAME, metadata=[("google-cloud-resource-prefix", backup.name)] + name=self.BACKUP_NAME, + metadata=[("google-cloud-resource-prefix", backup.name)], ) def test_exists_success(self): - from google.cloud.spanner_admin_database_v1.proto import backup_pb2 + from google.cloud.spanner_admin_database_v1 import Backup client = _Client() - backup_pb = backup_pb2.Backup(name=self.BACKUP_NAME) + backup_pb = Backup(name=self.BACKUP_NAME) api = client.database_admin_api = self._make_database_admin_api() api.get_backup.return_value = backup_pb @@ -352,7 +339,8 @@ def test_exists_success(self): self.assertTrue(backup.exists()) api.get_backup.assert_called_once_with( - self.BACKUP_NAME, metadata=[("google-cloud-resource-prefix", backup.name)] + name=self.BACKUP_NAME, + metadata=[("google-cloud-resource-prefix", backup.name)], ) def test_delete_grpc_error(self): @@ -368,7 +356,8 @@ def test_delete_grpc_error(self): backup.delete() api.delete_backup.assert_called_once_with( - self.BACKUP_NAME, metadata=[("google-cloud-resource-prefix", backup.name)] + name=self.BACKUP_NAME, + metadata=[("google-cloud-resource-prefix", backup.name)], ) def test_delete_not_found(self): @@ -384,7 +373,8 @@ def test_delete_not_found(self): backup.delete() api.delete_backup.assert_called_once_with( - self.BACKUP_NAME, metadata=[("google-cloud-resource-prefix", backup.name)] + name=self.BACKUP_NAME, + metadata=[("google-cloud-resource-prefix", backup.name)], ) def test_delete_success(self): @@ -399,7 +389,8 @@ def test_delete_success(self): backup.delete() api.delete_backup.assert_called_once_with( - self.BACKUP_NAME, metadata=[("google-cloud-resource-prefix", backup.name)] + name=self.BACKUP_NAME, + metadata=[("google-cloud-resource-prefix", backup.name)], ) def test_reload_grpc_error(self): @@ -415,7 +406,8 @@ def test_reload_grpc_error(self): backup.reload() api.get_backup.assert_called_once_with( - self.BACKUP_NAME, metadata=[("google-cloud-resource-prefix", backup.name)] + name=self.BACKUP_NAME, + metadata=[("google-cloud-resource-prefix", backup.name)], ) def test_reload_not_found(self): @@ -431,22 +423,21 @@ def test_reload_not_found(self): backup.reload() api.get_backup.assert_called_once_with( - self.BACKUP_NAME, metadata=[("google-cloud-resource-prefix", backup.name)] + name=self.BACKUP_NAME, + metadata=[("google-cloud-resource-prefix", backup.name)], ) def test_reload_success(self): - from google.cloud.spanner_admin_database_v1.proto import backup_pb2 - from google.cloud.spanner_admin_database_v1.gapic import enums - from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.spanner_admin_database_v1 import Backup timestamp = self._make_timestamp() client = _Client() - backup_pb = backup_pb2.Backup( + backup_pb = Backup( name=self.BACKUP_NAME, database=self.DATABASE_NAME, - expire_time=_datetime_to_pb_timestamp(timestamp), - create_time=_datetime_to_pb_timestamp(timestamp), + expire_time=timestamp, + create_time=timestamp, size_bytes=10, state=1, referencing_databases=[], @@ -462,16 +453,17 @@ def test_reload_success(self): self.assertEqual(backup.expire_time, timestamp) self.assertEqual(backup.create_time, timestamp) self.assertEqual(backup.size_bytes, 10) - self.assertEqual(backup.state, enums.Backup.State.CREATING) + self.assertEqual(backup.state, Backup.State.CREATING) self.assertEqual(backup.referencing_databases, []) api.get_backup.assert_called_once_with( - self.BACKUP_NAME, metadata=[("google-cloud-resource-prefix", backup.name)] + name=self.BACKUP_NAME, + metadata=[("google-cloud-resource-prefix", backup.name)], ) def test_update_expire_time_grpc_error(self): from google.api_core.exceptions import Unknown - from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.spanner_admin_database_v1 import Backup client = _Client() api = client.database_admin_api = self._make_database_admin_api() @@ -483,20 +475,17 @@ def test_update_expire_time_grpc_error(self): with self.assertRaises(Unknown): backup.update_expire_time(expire_time) - backup_update = { - "name": self.BACKUP_NAME, - "expire_time": _datetime_to_pb_timestamp(expire_time), - } + backup_update = Backup(name=self.BACKUP_NAME, expire_time=expire_time,) update_mask = {"paths": ["expire_time"]} api.update_backup.assert_called_once_with( - backup_update, - update_mask, + backup=backup_update, + update_mask=update_mask, metadata=[("google-cloud-resource-prefix", backup.name)], ) def test_update_expire_time_not_found(self): from google.api_core.exceptions import NotFound - from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.spanner_admin_database_v1 import Backup client = _Client() api = client.database_admin_api = self._make_database_admin_api() @@ -508,75 +497,46 @@ def test_update_expire_time_not_found(self): with self.assertRaises(NotFound): backup.update_expire_time(expire_time) - backup_update = { - "name": self.BACKUP_NAME, - "expire_time": _datetime_to_pb_timestamp(expire_time), - } + backup_update = Backup(name=self.BACKUP_NAME, expire_time=expire_time,) update_mask = {"paths": ["expire_time"]} api.update_backup.assert_called_once_with( - backup_update, - update_mask, + backup=backup_update, + update_mask=update_mask, metadata=[("google-cloud-resource-prefix", backup.name)], ) def test_update_expire_time_success(self): - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.spanner_admin_database_v1.proto import backup_pb2 + from google.cloud.spanner_admin_database_v1 import Backup client = _Client() api = client.database_admin_api = self._make_database_admin_api() - api.update_backup.return_type = backup_pb2.Backup(name=self.BACKUP_NAME) + api.update_backup.return_type = Backup(name=self.BACKUP_NAME) instance = _Instance(self.INSTANCE_NAME, client=client) backup = self._make_one(self.BACKUP_ID, instance) expire_time = self._make_timestamp() backup.update_expire_time(expire_time) - backup_update = { - "name": self.BACKUP_NAME, - "expire_time": _datetime_to_pb_timestamp(expire_time), - } + backup_update = Backup(name=self.BACKUP_NAME, expire_time=expire_time,) update_mask = {"paths": ["expire_time"]} api.update_backup.assert_called_once_with( - backup_update, - update_mask, + backup=backup_update, + update_mask=update_mask, metadata=[("google-cloud-resource-prefix", backup.name)], ) def test_is_ready(self): - from google.cloud.spanner_admin_database_v1.gapic import enums + from google.cloud.spanner_admin_database_v1 import Backup client = _Client() instance = _Instance(self.INSTANCE_NAME, client=client) backup = self._make_one(self.BACKUP_ID, instance) - backup._state = enums.Backup.State.READY + backup._state = Backup.State.READY self.assertTrue(backup.is_ready()) - backup._state = enums.Backup.State.CREATING + backup._state = Backup.State.CREATING self.assertFalse(backup.is_ready()) -class TestBackupInfo(_BaseTest): - def test_from_pb(self): - from google.cloud.spanner_admin_database_v1.proto import backup_pb2 - from google.cloud.spanner_v1.backup import BackupInfo - from google.cloud._helpers import _datetime_to_pb_timestamp - - backup_name = "backup_name" - timestamp = self._make_timestamp() - database_name = "database_name" - - pb = backup_pb2.BackupInfo( - backup=backup_name, - create_time=_datetime_to_pb_timestamp(timestamp), - source_database=database_name, - ) - backup_info = BackupInfo.from_pb(pb) - - self.assertEqual(backup_info.backup, backup_name) - self.assertEqual(backup_info.create_time, timestamp) - self.assertEqual(backup_info.source_database, database_name) - - class _Client(object): def __init__(self, project=TestBackup.PROJECT_ID): self.project = project diff --git a/tests/unit/test_batch.py b/tests/unit/test_batch.py index 9b831f4906..7c87f8a82a 100644 --- a/tests/unit/test_batch.py +++ b/tests/unit/test_batch.py @@ -24,9 +24,9 @@ ] BASE_ATTRIBUTES = { "db.type": "spanner", - "db.url": "spanner.googleapis.com:443", + "db.url": "spanner.googleapis.com", "db.instance": "testing", - "net.host.name": "spanner.googleapis.com:443", + "net.host.name": "spanner.googleapis.com", } @@ -51,18 +51,13 @@ def _getTargetClass(self): return _BatchBase def _compare_values(self, result, source): - from google.protobuf.struct_pb2 import ListValue - from google.protobuf.struct_pb2 import Value - for found, expected in zip(result, source): - self.assertIsInstance(found, ListValue) - self.assertEqual(len(found.values), len(expected)) - for found_cell, expected_cell in zip(found.values, expected): - self.assertIsInstance(found_cell, Value) + self.assertEqual(len(found), len(expected)) + for found_cell, expected_cell in zip(found, expected): if isinstance(expected_cell, int): - self.assertEqual(int(found_cell.string_value), expected_cell) + self.assertEqual(int(found_cell), expected_cell) else: - self.assertEqual(found_cell.string_value, expected_cell) + self.assertEqual(found_cell, expected_cell) def test_ctor(self): session = _Session() @@ -77,7 +72,7 @@ def test__check_state_virtual(self): base._check_state() def test_insert(self): - from google.cloud.spanner_v1.proto.mutation_pb2 import Mutation + from google.cloud.spanner_v1 import Mutation session = _Session() base = self._make_one(session) @@ -94,7 +89,7 @@ def test_insert(self): self._compare_values(write.values, VALUES) def test_update(self): - from google.cloud.spanner_v1.proto.mutation_pb2 import Mutation + from google.cloud.spanner_v1 import Mutation session = _Session() base = self._make_one(session) @@ -111,7 +106,7 @@ def test_update(self): self._compare_values(write.values, VALUES) def test_insert_or_update(self): - from google.cloud.spanner_v1.proto.mutation_pb2 import Mutation + from google.cloud.spanner_v1 import Mutation session = _Session() base = self._make_one(session) @@ -128,7 +123,7 @@ def test_insert_or_update(self): self._compare_values(write.values, VALUES) def test_replace(self): - from google.cloud.spanner_v1.proto.mutation_pb2 import Mutation + from google.cloud.spanner_v1 import Mutation session = _Session() base = self._make_one(session) @@ -145,7 +140,7 @@ def test_replace(self): self._compare_values(write.values, VALUES) def test_delete(self): - from google.cloud.spanner_v1.proto.mutation_pb2 import Mutation + from google.cloud.spanner_v1 import Mutation from google.cloud.spanner_v1.keyset import KeySet keys = [[0], [1], [2]] @@ -165,9 +160,7 @@ def test_delete(self): self.assertEqual(len(key_set_pb.ranges), 0) self.assertEqual(len(key_set_pb.keys), len(keys)) for found, expected in zip(key_set_pb.keys, keys): - self.assertEqual( - [int(value.string_value) for value in found.values], expected - ) + self.assertEqual([int(value) for value in found], expected) class TestBatch(_BaseTest, OpenTelemetryBase): @@ -220,8 +213,8 @@ def test_commit_grpc_error(self): def test_commit_ok(self): import datetime - from google.cloud.spanner_v1.proto.spanner_pb2 import CommitResponse - from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionOptions + from google.cloud.spanner_v1 import CommitResponse + from google.cloud.spanner_v1 import TransactionOptions from google.cloud._helpers import UTC from google.cloud._helpers import _datetime_to_pb_timestamp @@ -243,7 +236,7 @@ def test_commit_ok(self): self.assertEqual(session, self.SESSION_NAME) self.assertEqual(mutations, batch._mutations) self.assertIsInstance(single_use_txn, TransactionOptions) - self.assertTrue(single_use_txn.HasField("read_write")) + self.assertTrue(type(single_use_txn).pb(single_use_txn).HasField("read_write")) self.assertEqual(metadata, [("google-cloud-resource-prefix", database.name)]) self.assertSpanAttributes( @@ -269,8 +262,8 @@ def test_context_mgr_already_committed(self): def test_context_mgr_success(self): import datetime - from google.cloud.spanner_v1.proto.spanner_pb2 import CommitResponse - from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionOptions + from google.cloud.spanner_v1 import CommitResponse + from google.cloud.spanner_v1 import TransactionOptions from google.cloud._helpers import UTC from google.cloud._helpers import _datetime_to_pb_timestamp @@ -291,7 +284,7 @@ def test_context_mgr_success(self): self.assertEqual(session, self.SESSION_NAME) self.assertEqual(mutations, batch._mutations) self.assertIsInstance(single_use_txn, TransactionOptions) - self.assertTrue(single_use_txn.HasField("read_write")) + self.assertTrue(type(single_use_txn).pb(single_use_txn).HasField("read_write")) self.assertEqual(metadata, [("google-cloud-resource-prefix", database.name)]) self.assertSpanAttributes( @@ -300,7 +293,7 @@ def test_context_mgr_success(self): def test_context_mgr_failure(self): import datetime - from google.cloud.spanner_v1.proto.spanner_pb2 import CommitResponse + from google.cloud.spanner_v1 import CommitResponse from google.cloud._helpers import UTC from google.cloud._helpers import _datetime_to_pb_timestamp diff --git a/tests/unit/test_client.py b/tests/unit/test_client.py index 4eab87ceb5..a3001e61ae 100644 --- a/tests/unit/test_client.py +++ b/tests/unit/test_client.py @@ -38,7 +38,6 @@ class TestClient(unittest.TestCase): DISPLAY_NAME = "display-name" NODE_COUNT = 5 TIMEOUT_SECONDS = 80 - USER_AGENT = "you-sir-age-int" def _get_target_class(self): from google.cloud import spanner @@ -54,7 +53,6 @@ def _constructor_test_helper( creds, expected_creds=None, client_info=None, - user_agent=None, client_options=None, query_options=None, expected_query_options=None, @@ -80,7 +78,6 @@ def _constructor_test_helper( client = self._make_one( project=self.PROJECT, credentials=creds, - user_agent=user_agent, query_options=query_options, **kwargs ) @@ -94,7 +91,6 @@ def _constructor_test_helper( self.assertEqual(client.project, self.PROJECT) self.assertIs(client._client_info, expected_client_info) - self.assertEqual(client.user_agent, user_agent) if expected_client_options is not None: self.assertIsInstance( client._client_options, google.api_core.client_options.ClientOptions @@ -127,20 +123,6 @@ def test_constructor_default_scopes(self): creds = _make_credentials() self._constructor_test_helper(expected_scopes, creds) - @mock.patch("warnings.warn") - def test_constructor_custom_user_agent_and_timeout(self, mock_warn): - from google.cloud.spanner_v1 import client as MUT - - CUSTOM_USER_AGENT = "custom-application" - expected_scopes = (MUT.SPANNER_ADMIN_SCOPE,) - creds = _make_credentials() - self._constructor_test_helper( - expected_scopes, creds, user_agent=CUSTOM_USER_AGENT - ) - mock_warn.assert_called_once_with( - MUT._USER_AGENT_DEPRECATED, DeprecationWarning, stacklevel=2 - ) - def test_constructor_custom_client_info(self): from google.cloud.spanner_v1 import client as MUT @@ -189,7 +171,7 @@ def test_constructor_custom_client_options_dict(self): ) def test_constructor_custom_query_options_client_config(self): - from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest + from google.cloud.spanner_v1 import ExecuteSqlRequest from google.cloud.spanner_v1 import client as MUT expected_scopes = (MUT.SPANNER_ADMIN_SCOPE,) @@ -205,7 +187,7 @@ def test_constructor_custom_query_options_client_config(self): @mock.patch("google.cloud.spanner_v1.client._get_spanner_optimizer_version") def test_constructor_custom_query_options_env_config(self, mock_ver): - from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest + from google.cloud.spanner_v1 import ExecuteSqlRequest from google.cloud.spanner_v1 import client as MUT expected_scopes = (MUT.SPANNER_ADMIN_SCOPE,) @@ -440,83 +422,83 @@ def test_project_name_property(self): self.assertEqual(client.project_name, project_name) def test_list_instance_configs(self): - from google.cloud.spanner_admin_instance_v1.gapic import instance_admin_client - from google.cloud.spanner_admin_instance_v1.proto import ( - spanner_instance_admin_pb2, + from google.cloud.spanner_admin_instance_v1 import InstanceAdminClient + from google.cloud.spanner_admin_instance_v1 import ( + InstanceConfig as InstanceConfigPB, ) - from google.cloud.spanner_v1.client import InstanceConfig + from google.cloud.spanner_admin_instance_v1 import ListInstanceConfigsRequest + from google.cloud.spanner_admin_instance_v1 import ListInstanceConfigsResponse - api = instance_admin_client.InstanceAdminClient(mock.Mock()) + api = InstanceAdminClient(credentials=mock.Mock()) credentials = _make_credentials() client = self._make_one(project=self.PROJECT, credentials=credentials) client._instance_admin_api = api - instance_config_pbs = spanner_instance_admin_pb2.ListInstanceConfigsResponse( + instance_config_pbs = ListInstanceConfigsResponse( instance_configs=[ - spanner_instance_admin_pb2.InstanceConfig( + InstanceConfigPB( name=self.CONFIGURATION_NAME, display_name=self.DISPLAY_NAME ) ] ) - lic_api = api._inner_api_calls["list_instance_configs"] = mock.Mock( - return_value=instance_config_pbs - ) + lic_api = api._transport._wrapped_methods[ + api._transport.list_instance_configs + ] = mock.Mock(return_value=instance_config_pbs) response = client.list_instance_configs() instance_configs = list(response) instance_config = instance_configs[0] - self.assertIsInstance(instance_config, InstanceConfig) + self.assertIsInstance(instance_config, InstanceConfigPB) self.assertEqual(instance_config.name, self.CONFIGURATION_NAME) self.assertEqual(instance_config.display_name, self.DISPLAY_NAME) - expected_metadata = [ + expected_metadata = ( ("google-cloud-resource-prefix", client.project_name), ("x-goog-request-params", "parent={}".format(client.project_name)), - ] + ) lic_api.assert_called_once_with( - spanner_instance_admin_pb2.ListInstanceConfigsRequest(parent=self.PATH), + ListInstanceConfigsRequest(parent=self.PATH), metadata=expected_metadata, retry=mock.ANY, timeout=mock.ANY, ) def test_list_instance_configs_w_options(self): - from google.cloud.spanner_admin_instance_v1.gapic import instance_admin_client - from google.cloud.spanner_admin_instance_v1.proto import ( - spanner_instance_admin_pb2, + from google.cloud.spanner_admin_instance_v1 import InstanceAdminClient + from google.cloud.spanner_admin_instance_v1 import ( + InstanceConfig as InstanceConfigPB, ) + from google.cloud.spanner_admin_instance_v1 import ListInstanceConfigsRequest + from google.cloud.spanner_admin_instance_v1 import ListInstanceConfigsResponse - api = instance_admin_client.InstanceAdminClient(mock.Mock()) + api = InstanceAdminClient(credentials=mock.Mock()) credentials = _make_credentials() client = self._make_one(project=self.PROJECT, credentials=credentials) client._instance_admin_api = api - instance_config_pbs = spanner_instance_admin_pb2.ListInstanceConfigsResponse( + instance_config_pbs = ListInstanceConfigsResponse( instance_configs=[ - spanner_instance_admin_pb2.InstanceConfig( + InstanceConfigPB( name=self.CONFIGURATION_NAME, display_name=self.DISPLAY_NAME ) ] ) - lic_api = api._inner_api_calls["list_instance_configs"] = mock.Mock( - return_value=instance_config_pbs - ) + lic_api = api._transport._wrapped_methods[ + api._transport.list_instance_configs + ] = mock.Mock(return_value=instance_config_pbs) - token = "token" page_size = 42 - list(client.list_instance_configs(page_token=token, page_size=42)) + list(client.list_instance_configs(page_size=42)) - expected_metadata = [ + expected_metadata = ( ("google-cloud-resource-prefix", client.project_name), ("x-goog-request-params", "parent={}".format(client.project_name)), - ] + ) lic_api.assert_called_once_with( - spanner_instance_admin_pb2.ListInstanceConfigsRequest( - parent=self.PATH, page_size=page_size, page_token=token - ), + ListInstanceConfigsRequest(parent=self.PATH, page_size=page_size), metadata=expected_metadata, retry=mock.ANY, timeout=mock.ANY, @@ -559,20 +541,19 @@ def test_instance_factory_explicit(self): self.assertIs(instance._client, client) def test_list_instances(self): - from google.cloud.spanner_admin_instance_v1.gapic import instance_admin_client - from google.cloud.spanner_admin_instance_v1.proto import ( - spanner_instance_admin_pb2, - ) - from google.cloud.spanner_v1.client import Instance + from google.cloud.spanner_admin_instance_v1 import InstanceAdminClient + from google.cloud.spanner_admin_instance_v1 import Instance as InstancePB + from google.cloud.spanner_admin_instance_v1 import ListInstancesRequest + from google.cloud.spanner_admin_instance_v1 import ListInstancesResponse - api = instance_admin_client.InstanceAdminClient(mock.Mock()) + api = InstanceAdminClient(credentials=mock.Mock()) credentials = _make_credentials() client = self._make_one(project=self.PROJECT, credentials=credentials) client._instance_admin_api = api - instance_pbs = spanner_instance_admin_pb2.ListInstancesResponse( + instance_pbs = ListInstancesResponse( instances=[ - spanner_instance_admin_pb2.Instance( + InstancePB( name=self.INSTANCE_NAME, config=self.CONFIGURATION_NAME, display_name=self.DISPLAY_NAME, @@ -581,61 +562,57 @@ def test_list_instances(self): ] ) - li_api = api._inner_api_calls["list_instances"] = mock.Mock( - return_value=instance_pbs - ) + li_api = api._transport._wrapped_methods[ + api._transport.list_instances + ] = mock.Mock(return_value=instance_pbs) response = client.list_instances() instances = list(response) instance = instances[0] - self.assertIsInstance(instance, Instance) + self.assertIsInstance(instance, InstancePB) self.assertEqual(instance.name, self.INSTANCE_NAME) - self.assertEqual(instance.configuration_name, self.CONFIGURATION_NAME) + self.assertEqual(instance.config, self.CONFIGURATION_NAME) self.assertEqual(instance.display_name, self.DISPLAY_NAME) self.assertEqual(instance.node_count, self.NODE_COUNT) - expected_metadata = [ + expected_metadata = ( ("google-cloud-resource-prefix", client.project_name), ("x-goog-request-params", "parent={}".format(client.project_name)), - ] + ) li_api.assert_called_once_with( - spanner_instance_admin_pb2.ListInstancesRequest(parent=self.PATH), + ListInstancesRequest(parent=self.PATH), metadata=expected_metadata, retry=mock.ANY, timeout=mock.ANY, ) def test_list_instances_w_options(self): - from google.cloud.spanner_admin_instance_v1.gapic import instance_admin_client - from google.cloud.spanner_admin_instance_v1.proto import ( - spanner_instance_admin_pb2, - ) + from google.cloud.spanner_admin_instance_v1 import InstanceAdminClient + from google.cloud.spanner_admin_instance_v1 import ListInstancesRequest + from google.cloud.spanner_admin_instance_v1 import ListInstancesResponse - api = instance_admin_client.InstanceAdminClient(mock.Mock()) + api = InstanceAdminClient(credentials=mock.Mock()) credentials = _make_credentials() client = self._make_one(project=self.PROJECT, credentials=credentials) client._instance_admin_api = api - instance_pbs = spanner_instance_admin_pb2.ListInstancesResponse(instances=[]) + instance_pbs = ListInstancesResponse(instances=[]) - li_api = api._inner_api_calls["list_instances"] = mock.Mock( - return_value=instance_pbs - ) + li_api = api._transport._wrapped_methods[ + api._transport.list_instances + ] = mock.Mock(return_value=instance_pbs) - token = "token" - filter = "name:instance" page_size = 42 - list(client.list_instances(filter_=filter, page_token=token, page_size=42)) + filter_ = "name:instance" + list(client.list_instances(filter_=filter_, page_size=42)) - expected_metadata = [ + expected_metadata = ( ("google-cloud-resource-prefix", client.project_name), ("x-goog-request-params", "parent={}".format(client.project_name)), - ] + ) li_api.assert_called_once_with( - spanner_instance_admin_pb2.ListInstancesRequest( - parent=self.PATH, filter=filter, page_size=page_size, page_token=token - ), + ListInstancesRequest(parent=self.PATH, filter=filter_, page_size=page_size), metadata=expected_metadata, retry=mock.ANY, timeout=mock.ANY, diff --git a/tests/unit/test_database.py b/tests/unit/test_database.py index d8a581f87b..175c269d50 100644 --- a/tests/unit/test_database.py +++ b/tests/unit/test_database.py @@ -17,6 +17,7 @@ import mock +from google.cloud.spanner_v1.param_types import INT64 DML_WO_PARAM = """ DELETE FROM citizens @@ -27,7 +28,7 @@ VALUES ("Phred", "Phlyntstone", @age) """ PARAMS = {"age": 30} -PARAM_TYPES = {"age": "INT64"} +PARAM_TYPES = {"age": INT64} MODE = 2 # PROFILE @@ -88,11 +89,9 @@ def _make_database_admin_api(): @staticmethod def _make_spanner_api(): - import google.cloud.spanner_v1.gapic.spanner_client + from google.cloud.spanner_v1 import SpannerClient - return mock.create_autospec( - google.cloud.spanner_v1.gapic.spanner_client.SpannerClient, instance=True - ) + return mock.create_autospec(SpannerClient, instance=True) def test_ctor_defaults(self): from google.cloud.spanner_v1.pool import BurstyPool @@ -147,53 +146,45 @@ def test_ctor_w_ddl_statements_ok(self): self.assertEqual(list(database.ddl_statements), DDL_STATEMENTS) def test_from_pb_bad_database_name(self): - from google.cloud.spanner_admin_database_v1.proto import ( - spanner_database_admin_pb2 as admin_v1_pb2, - ) + from google.cloud.spanner_admin_database_v1 import Database database_name = "INCORRECT_FORMAT" - database_pb = admin_v1_pb2.Database(name=database_name) + database_pb = Database(name=database_name) klass = self._get_target_class() with self.assertRaises(ValueError): klass.from_pb(database_pb, None) def test_from_pb_project_mistmatch(self): - from google.cloud.spanner_admin_database_v1.proto import ( - spanner_database_admin_pb2 as admin_v1_pb2, - ) + from google.cloud.spanner_admin_database_v1 import Database ALT_PROJECT = "ALT_PROJECT" client = _Client(project=ALT_PROJECT) instance = _Instance(self.INSTANCE_NAME, client) - database_pb = admin_v1_pb2.Database(name=self.DATABASE_NAME) + database_pb = Database(name=self.DATABASE_NAME) klass = self._get_target_class() with self.assertRaises(ValueError): klass.from_pb(database_pb, instance) def test_from_pb_instance_mistmatch(self): - from google.cloud.spanner_admin_database_v1.proto import ( - spanner_database_admin_pb2 as admin_v1_pb2, - ) + from google.cloud.spanner_admin_database_v1 import Database ALT_INSTANCE = "/projects/%s/instances/ALT-INSTANCE" % (self.PROJECT_ID,) client = _Client() instance = _Instance(ALT_INSTANCE, client) - database_pb = admin_v1_pb2.Database(name=self.DATABASE_NAME) + database_pb = Database(name=self.DATABASE_NAME) klass = self._get_target_class() with self.assertRaises(ValueError): klass.from_pb(database_pb, instance) def test_from_pb_success_w_explicit_pool(self): - from google.cloud.spanner_admin_database_v1.proto import ( - spanner_database_admin_pb2 as admin_v1_pb2, - ) + from google.cloud.spanner_admin_database_v1 import Database client = _Client() instance = _Instance(self.INSTANCE_NAME, client) - database_pb = admin_v1_pb2.Database(name=self.DATABASE_NAME) + database_pb = Database(name=self.DATABASE_NAME) klass = self._get_target_class() pool = _Pool() @@ -205,16 +196,14 @@ def test_from_pb_success_w_explicit_pool(self): self.assertIs(database._pool, pool) def test_from_pb_success_w_hyphen_w_default_pool(self): - from google.cloud.spanner_admin_database_v1.proto import ( - spanner_database_admin_pb2 as admin_v1_pb2, - ) + from google.cloud.spanner_admin_database_v1 import Database from google.cloud.spanner_v1.pool import BurstyPool DATABASE_ID_HYPHEN = "database-id" DATABASE_NAME_HYPHEN = self.INSTANCE_NAME + "/databases/" + DATABASE_ID_HYPHEN client = _Client() instance = _Instance(self.INSTANCE_NAME, client) - database_pb = admin_v1_pb2.Database(name=DATABASE_NAME_HYPHEN) + database_pb = Database(name=DATABASE_NAME_HYPHEN) klass = self._get_target_class() database = klass.from_pb(database_pb, instance) @@ -241,16 +230,16 @@ def test_create_time_property(self): self.assertEqual(database.create_time, expected_create_time) def test_state_property(self): - from google.cloud.spanner_admin_database_v1.gapic import enums + from google.cloud.spanner_admin_database_v1 import Database instance = _Instance(self.INSTANCE_NAME) pool = _Pool() database = self._make_one(self.DATABASE_ID, instance, pool=pool) - expected_state = database._state = enums.Database.State.READY + expected_state = database._state = Database.State.READY self.assertEqual(database.state, expected_state) def test_restore_info(self): - from google.cloud.spanner_v1.database import RestoreInfo + from google.cloud.spanner_admin_database_v1 import RestoreInfo instance = _Instance(self.INSTANCE_NAME) pool = _Pool() @@ -380,6 +369,7 @@ def test___ne__(self): def test_create_grpc_error(self): from google.api_core.exceptions import GoogleAPICallError from google.api_core.exceptions import Unknown + from google.cloud.spanner_admin_database_v1 import CreateDatabaseRequest client = _Client() api = client.database_admin_api = self._make_database_admin_api() @@ -392,15 +382,20 @@ def test_create_grpc_error(self): with self.assertRaises(GoogleAPICallError): database.create() - api.create_database.assert_called_once_with( + expected_request = CreateDatabaseRequest( parent=self.INSTANCE_NAME, create_statement="CREATE DATABASE {}".format(self.DATABASE_ID), extra_statements=[], + ) + + api.create_database.assert_called_once_with( + request=expected_request, metadata=[("google-cloud-resource-prefix", database.name)], ) def test_create_already_exists(self): from google.cloud.exceptions import Conflict + from google.cloud.spanner_admin_database_v1 import CreateDatabaseRequest DATABASE_ID_HYPHEN = "database-id" client = _Client() @@ -413,15 +408,20 @@ def test_create_already_exists(self): with self.assertRaises(Conflict): database.create() - api.create_database.assert_called_once_with( + expected_request = CreateDatabaseRequest( parent=self.INSTANCE_NAME, create_statement="CREATE DATABASE `{}`".format(DATABASE_ID_HYPHEN), extra_statements=[], + ) + + api.create_database.assert_called_once_with( + request=expected_request, metadata=[("google-cloud-resource-prefix", database.name)], ) def test_create_instance_not_found(self): from google.cloud.exceptions import NotFound + from google.cloud.spanner_admin_database_v1 import CreateDatabaseRequest client = _Client() api = client.database_admin_api = self._make_database_admin_api() @@ -433,15 +433,20 @@ def test_create_instance_not_found(self): with self.assertRaises(NotFound): database.create() - api.create_database.assert_called_once_with( + expected_request = CreateDatabaseRequest( parent=self.INSTANCE_NAME, create_statement="CREATE DATABASE {}".format(self.DATABASE_ID), extra_statements=[], + ) + + api.create_database.assert_called_once_with( + request=expected_request, metadata=[("google-cloud-resource-prefix", database.name)], ) def test_create_success(self): from tests._fixtures import DDL_STATEMENTS + from google.cloud.spanner_admin_database_v1 import CreateDatabaseRequest op_future = object() client = _Client() @@ -457,10 +462,14 @@ def test_create_success(self): self.assertIs(future, op_future) - api.create_database.assert_called_once_with( + expected_request = CreateDatabaseRequest( parent=self.INSTANCE_NAME, create_statement="CREATE DATABASE {}".format(self.DATABASE_ID), extra_statements=DDL_STATEMENTS, + ) + + api.create_database.assert_called_once_with( + request=expected_request, metadata=[("google-cloud-resource-prefix", database.name)], ) @@ -478,7 +487,7 @@ def test_exists_grpc_error(self): database.exists() api.get_database_ddl.assert_called_once_with( - self.DATABASE_NAME, + database=self.DATABASE_NAME, metadata=[("google-cloud-resource-prefix", database.name)], ) @@ -495,18 +504,16 @@ def test_exists_not_found(self): self.assertFalse(database.exists()) api.get_database_ddl.assert_called_once_with( - self.DATABASE_NAME, + database=self.DATABASE_NAME, metadata=[("google-cloud-resource-prefix", database.name)], ) def test_exists_success(self): - from google.cloud.spanner_admin_database_v1.proto import ( - spanner_database_admin_pb2 as admin_v1_pb2, - ) + from google.cloud.spanner_admin_database_v1 import GetDatabaseDdlResponse from tests._fixtures import DDL_STATEMENTS client = _Client() - ddl_pb = admin_v1_pb2.GetDatabaseDdlResponse(statements=DDL_STATEMENTS) + ddl_pb = GetDatabaseDdlResponse(statements=DDL_STATEMENTS) api = client.database_admin_api = self._make_database_admin_api() api.get_database_ddl.return_value = ddl_pb instance = _Instance(self.INSTANCE_NAME, client=client) @@ -516,7 +523,7 @@ def test_exists_success(self): self.assertTrue(database.exists()) api.get_database_ddl.assert_called_once_with( - self.DATABASE_NAME, + database=self.DATABASE_NAME, metadata=[("google-cloud-resource-prefix", database.name)], ) @@ -534,7 +541,7 @@ def test_reload_grpc_error(self): database.reload() api.get_database_ddl.assert_called_once_with( - self.DATABASE_NAME, + database=self.DATABASE_NAME, metadata=[("google-cloud-resource-prefix", database.name)], ) @@ -552,26 +559,25 @@ def test_reload_not_found(self): database.reload() api.get_database_ddl.assert_called_once_with( - self.DATABASE_NAME, + database=self.DATABASE_NAME, metadata=[("google-cloud-resource-prefix", database.name)], ) def test_reload_success(self): - from google.cloud.spanner_admin_database_v1.proto import ( - spanner_database_admin_pb2 as admin_v1_pb2, - ) - from google.cloud.spanner_admin_database_v1.gapic import enums + from google.cloud.spanner_admin_database_v1 import Database + from google.cloud.spanner_admin_database_v1 import GetDatabaseDdlResponse + from google.cloud.spanner_admin_database_v1 import RestoreInfo from google.cloud._helpers import _datetime_to_pb_timestamp from tests._fixtures import DDL_STATEMENTS timestamp = self._make_timestamp() - restore_info = admin_v1_pb2.RestoreInfo() + restore_info = RestoreInfo() client = _Client() - ddl_pb = admin_v1_pb2.GetDatabaseDdlResponse(statements=DDL_STATEMENTS) + ddl_pb = GetDatabaseDdlResponse(statements=DDL_STATEMENTS) api = client.database_admin_api = self._make_database_admin_api() api.get_database_ddl.return_value = ddl_pb - db_pb = admin_v1_pb2.Database( + db_pb = Database( state=2, create_time=_datetime_to_pb_timestamp(timestamp), restore_info=restore_info, @@ -582,23 +588,24 @@ def test_reload_success(self): database = self._make_one(self.DATABASE_ID, instance, pool=pool) database.reload() - self.assertEqual(database._state, enums.Database.State.READY) + self.assertEqual(database._state, Database.State.READY) self.assertEqual(database._create_time, timestamp) self.assertEqual(database._restore_info, restore_info) self.assertEqual(database._ddl_statements, tuple(DDL_STATEMENTS)) api.get_database_ddl.assert_called_once_with( - self.DATABASE_NAME, + database=self.DATABASE_NAME, metadata=[("google-cloud-resource-prefix", database.name)], ) api.get_database.assert_called_once_with( - self.DATABASE_NAME, + name=self.DATABASE_NAME, metadata=[("google-cloud-resource-prefix", database.name)], ) def test_update_ddl_grpc_error(self): from google.api_core.exceptions import Unknown from tests._fixtures import DDL_STATEMENTS + from google.cloud.spanner_admin_database_v1 import UpdateDatabaseDdlRequest client = _Client() api = client.database_admin_api = self._make_database_admin_api() @@ -610,16 +617,19 @@ def test_update_ddl_grpc_error(self): with self.assertRaises(Unknown): database.update_ddl(DDL_STATEMENTS) + expected_request = UpdateDatabaseDdlRequest( + database=self.DATABASE_NAME, statements=DDL_STATEMENTS, operation_id="", + ) + api.update_database_ddl.assert_called_once_with( - self.DATABASE_NAME, - DDL_STATEMENTS, - "", + request=expected_request, metadata=[("google-cloud-resource-prefix", database.name)], ) def test_update_ddl_not_found(self): from google.cloud.exceptions import NotFound from tests._fixtures import DDL_STATEMENTS + from google.cloud.spanner_admin_database_v1 import UpdateDatabaseDdlRequest client = _Client() api = client.database_admin_api = self._make_database_admin_api() @@ -631,15 +641,18 @@ def test_update_ddl_not_found(self): with self.assertRaises(NotFound): database.update_ddl(DDL_STATEMENTS) + expected_request = UpdateDatabaseDdlRequest( + database=self.DATABASE_NAME, statements=DDL_STATEMENTS, operation_id="", + ) + api.update_database_ddl.assert_called_once_with( - self.DATABASE_NAME, - DDL_STATEMENTS, - "", + request=expected_request, metadata=[("google-cloud-resource-prefix", database.name)], ) def test_update_ddl(self): from tests._fixtures import DDL_STATEMENTS + from google.cloud.spanner_admin_database_v1 import UpdateDatabaseDdlRequest op_future = object() client = _Client() @@ -653,15 +666,18 @@ def test_update_ddl(self): self.assertIs(future, op_future) + expected_request = UpdateDatabaseDdlRequest( + database=self.DATABASE_NAME, statements=DDL_STATEMENTS, operation_id="", + ) + api.update_database_ddl.assert_called_once_with( - self.DATABASE_NAME, - DDL_STATEMENTS, - "", + request=expected_request, metadata=[("google-cloud-resource-prefix", database.name)], ) def test_update_ddl_w_operation_id(self): from tests._fixtures import DDL_STATEMENTS + from google.cloud.spanner_admin_database_v1 import UpdateDatabaseDdlRequest op_future = object() client = _Client() @@ -675,10 +691,14 @@ def test_update_ddl_w_operation_id(self): self.assertIs(future, op_future) + expected_request = UpdateDatabaseDdlRequest( + database=self.DATABASE_NAME, + statements=DDL_STATEMENTS, + operation_id="someOperationId", + ) + api.update_database_ddl.assert_called_once_with( - self.DATABASE_NAME, - DDL_STATEMENTS, - "someOperationId", + request=expected_request, metadata=[("google-cloud-resource-prefix", database.name)], ) @@ -696,7 +716,7 @@ def test_drop_grpc_error(self): database.drop() api.drop_database.assert_called_once_with( - self.DATABASE_NAME, + database=self.DATABASE_NAME, metadata=[("google-cloud-resource-prefix", database.name)], ) @@ -714,7 +734,7 @@ def test_drop_not_found(self): database.drop() api.drop_database.assert_called_once_with( - self.DATABASE_NAME, + database=self.DATABASE_NAME, metadata=[("google-cloud-resource-prefix", database.name)], ) @@ -731,7 +751,7 @@ def test_drop_success(self): database.drop() api.drop_database.assert_called_once_with( - self.DATABASE_NAME, + database=self.DATABASE_NAME, metadata=[("google-cloud-resource-prefix", database.name)], ) @@ -741,11 +761,11 @@ def _execute_partitioned_dml_helper( from google.api_core.exceptions import Aborted from google.api_core.retry import Retry from google.protobuf.struct_pb2 import Struct - from google.cloud.spanner_v1.proto.result_set_pb2 import ( + from google.cloud.spanner_v1 import ( PartialResultSet, ResultSetStats, ) - from google.cloud.spanner_v1.proto.transaction_pb2 import ( + from google.cloud.spanner_v1 import ( Transaction as TransactionPB, TransactionSelector, TransactionOptions, @@ -754,6 +774,7 @@ def _execute_partitioned_dml_helper( _make_value_pb, _merge_query_options, ) + from google.cloud.spanner_v1 import ExecuteSqlRequest import collections @@ -792,8 +813,8 @@ def _execute_partitioned_dml_helper( ) api.begin_transaction.assert_called_with( - session.name, - txn_options, + session=session.name, + options=txn_options, metadata=[("google-cloud-resource-prefix", database.name)], ) if retried: @@ -806,7 +827,7 @@ def _execute_partitioned_dml_helper( fields={key: _make_value_pb(value) for (key, value) in params.items()} ) else: - expected_params = None + expected_params = {} expected_transaction = TransactionSelector(id=self.TRANSACTION_ID) expected_query_options = client._query_options @@ -815,26 +836,33 @@ def _execute_partitioned_dml_helper( expected_query_options, query_options ) - api.execute_streaming_sql.assert_any_call( - self.SESSION_NAME, - dml, + expected_request = ExecuteSqlRequest( + session=self.SESSION_NAME, + sql=dml, transaction=expected_transaction, params=expected_params, param_types=param_types, query_options=expected_query_options, + ) + + api.execute_streaming_sql.assert_any_call( + request=expected_request, metadata=[("google-cloud-resource-prefix", database.name)], ) if retried: expected_retry_transaction = TransactionSelector( id=self.RETRY_TRANSACTION_ID ) - api.execute_streaming_sql.assert_called_with( - self.SESSION_NAME, - dml, + expected_request = ExecuteSqlRequest( + session=self.SESSION_NAME, + sql=dml, transaction=expected_retry_transaction, params=expected_params, param_types=param_types, query_options=expected_query_options, + ) + api.execute_streaming_sql.assert_called_with( + request=expected_request, metadata=[("google-cloud-resource-prefix", database.name)], ) self.assertEqual(api.execute_streaming_sql.call_count, 2) @@ -854,7 +882,7 @@ def test_execute_partitioned_dml_w_params_and_param_types(self): ) def test_execute_partitioned_dml_w_query_options(self): - from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest + from google.cloud.spanner_v1 import ExecuteSqlRequest self._execute_partitioned_dml_helper( dml=DML_W_PARAM, @@ -1112,31 +1140,31 @@ def test_restore_success(self): ) def test_is_ready(self): - from google.cloud.spanner_admin_database_v1.gapic import enums + from google.cloud.spanner_admin_database_v1 import Database client = _Client() instance = _Instance(self.INSTANCE_NAME, client=client) pool = _Pool() database = self._make_one(self.DATABASE_ID, instance, pool=pool) - database._state = enums.Database.State.READY + database._state = Database.State.READY self.assertTrue(database.is_ready()) - database._state = enums.Database.State.READY_OPTIMIZING + database._state = Database.State.READY_OPTIMIZING self.assertTrue(database.is_ready()) - database._state = enums.Database.State.CREATING + database._state = Database.State.CREATING self.assertFalse(database.is_ready()) def test_is_optimized(self): - from google.cloud.spanner_admin_database_v1.gapic import enums + from google.cloud.spanner_admin_database_v1 import Database client = _Client() instance = _Instance(self.INSTANCE_NAME, client=client) pool = _Pool() database = self._make_one(self.DATABASE_ID, instance, pool=pool) - database._state = enums.Database.State.READY + database._state = Database.State.READY self.assertTrue(database.is_optimized()) - database._state = enums.Database.State.READY_OPTIMIZING + database._state = Database.State.READY_OPTIMIZING self.assertFalse(database.is_optimized()) - database._state = enums.Database.State.CREATING + database._state = Database.State.CREATING self.assertFalse(database.is_optimized()) def test_list_database_operations_grpc_error(self): @@ -1224,7 +1252,7 @@ def _get_target_class(self): @staticmethod def _make_spanner_client(): - from google.cloud.spanner_v1.gapic.spanner_client import SpannerClient + from google.cloud.spanner_v1 import SpannerClient return mock.create_autospec(SpannerClient) @@ -1235,8 +1263,8 @@ def test_ctor(self): def test_context_mgr_success(self): import datetime - from google.cloud.spanner_v1.proto.spanner_pb2 import CommitResponse - from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionOptions + from google.cloud.spanner_v1 import CommitResponse + from google.cloud.spanner_v1 import TransactionOptions from google.cloud._helpers import UTC from google.cloud._helpers import _datetime_to_pb_timestamp from google.cloud.spanner_v1.batch import Batch @@ -1263,7 +1291,7 @@ def test_context_mgr_success(self): expected_txn_options = TransactionOptions(read_write={}) api.commit.assert_called_once_with( - self.SESSION_NAME, + session=self.SESSION_NAME, mutations=[], single_use_transaction=expected_txn_options, metadata=[("google-cloud-resource-prefix", database.name)], @@ -1823,40 +1851,14 @@ def test_process_w_query_batch(self): def _make_instance_api(): - from google.cloud.spanner_admin_instance_v1.gapic.instance_admin_client import ( - InstanceAdminClient, - ) + from google.cloud.spanner_admin_instance_v1 import InstanceAdminClient return mock.create_autospec(InstanceAdminClient) -class TestRestoreInfo(_BaseTest): - def test_from_pb(self): - from google.cloud.spanner_v1.database import RestoreInfo - from google.cloud.spanner_admin_database_v1.gapic import enums - from google.cloud.spanner_admin_database_v1.proto import ( - backup_pb2, - spanner_database_admin_pb2 as admin_v1_pb2, - ) - from google.cloud._helpers import _datetime_to_pb_timestamp - - timestamp = self._make_timestamp() - restore_pb = admin_v1_pb2.RestoreInfo( - source_type=1, - backup_info=backup_pb2.BackupInfo( - backup="backup_path", - create_time=_datetime_to_pb_timestamp(timestamp), - source_database="database_path", - ), - ) - restore_info = RestoreInfo.from_pb(restore_pb) - self.assertEqual(restore_info.source_type, enums.RestoreSourceType.BACKUP) - self.assertEqual(restore_info.backup_info.create_time, timestamp) - - class _Client(object): def __init__(self, project=TestDatabase.PROJECT_ID): - from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest + from google.cloud.spanner_v1 import ExecuteSqlRequest self.project = project self.project_name = "projects/" + self.project diff --git a/tests/unit/test_instance.py b/tests/unit/test_instance.py index c1a0b187ac..0694d438a2 100644 --- a/tests/unit/test_instance.py +++ b/tests/unit/test_instance.py @@ -92,12 +92,10 @@ def test_copy(self): self.assertEqual(instance, new_instance) def test__update_from_pb_success(self): - from google.cloud.spanner_admin_instance_v1.proto import ( - spanner_instance_admin_pb2 as admin_v1_pb2, - ) + from google.cloud.spanner_admin_instance_v1 import Instance display_name = "display_name" - instance_pb = admin_v1_pb2.Instance(display_name=display_name) + instance_pb = Instance(display_name=display_name) instance = self._make_one(None, None, None, None) self.assertEqual(instance.display_name, None) @@ -105,11 +103,9 @@ def test__update_from_pb_success(self): self.assertEqual(instance.display_name, display_name) def test__update_from_pb_no_display_name(self): - from google.cloud.spanner_admin_instance_v1.proto import ( - spanner_instance_admin_pb2 as admin_v1_pb2, - ) + from google.cloud.spanner_admin_instance_v1 import Instance - instance_pb = admin_v1_pb2.Instance() + instance_pb = Instance() instance = self._make_one(None, None, None, None) self.assertEqual(instance.display_name, None) with self.assertRaises(ValueError): @@ -117,41 +113,35 @@ def test__update_from_pb_no_display_name(self): self.assertEqual(instance.display_name, None) def test_from_pb_bad_instance_name(self): - from google.cloud.spanner_admin_instance_v1.proto import ( - spanner_instance_admin_pb2 as admin_v1_pb2, - ) + from google.cloud.spanner_admin_instance_v1 import Instance instance_name = "INCORRECT_FORMAT" - instance_pb = admin_v1_pb2.Instance(name=instance_name) + instance_pb = Instance(name=instance_name) klass = self._getTargetClass() with self.assertRaises(ValueError): klass.from_pb(instance_pb, None) def test_from_pb_project_mistmatch(self): - from google.cloud.spanner_admin_instance_v1.proto import ( - spanner_instance_admin_pb2 as admin_v1_pb2, - ) + from google.cloud.spanner_admin_instance_v1 import Instance ALT_PROJECT = "ALT_PROJECT" client = _Client(project=ALT_PROJECT) self.assertNotEqual(self.PROJECT, ALT_PROJECT) - instance_pb = admin_v1_pb2.Instance(name=self.INSTANCE_NAME) + instance_pb = Instance(name=self.INSTANCE_NAME) klass = self._getTargetClass() with self.assertRaises(ValueError): klass.from_pb(instance_pb, client) def test_from_pb_success(self): - from google.cloud.spanner_admin_instance_v1.proto import ( - spanner_instance_admin_pb2 as admin_v1_pb2, - ) + from google.cloud.spanner_admin_instance_v1 import Instance client = _Client(project=self.PROJECT) - instance_pb = admin_v1_pb2.Instance( + instance_pb = Instance( name=self.INSTANCE_NAME, config=self.CONFIG_NAME, display_name=self.INSTANCE_ID, @@ -281,12 +271,10 @@ def test_exists_instance_not_found(self): self.assertEqual(metadata, [("google-cloud-resource-prefix", instance.name)]) def test_exists_success(self): - from google.cloud.spanner_admin_instance_v1.proto import ( - spanner_instance_admin_pb2 as admin_v1_pb2, - ) + from google.cloud.spanner_admin_instance_v1 import Instance client = _Client(self.PROJECT) - instance_pb = admin_v1_pb2.Instance( + instance_pb = Instance( name=self.INSTANCE_NAME, config=self.CONFIG_NAME, display_name=self.DISPLAY_NAME, @@ -331,12 +319,10 @@ def test_reload_instance_not_found(self): self.assertEqual(metadata, [("google-cloud-resource-prefix", instance.name)]) def test_reload_success(self): - from google.cloud.spanner_admin_instance_v1.proto import ( - spanner_instance_admin_pb2 as admin_v1_pb2, - ) + from google.cloud.spanner_admin_instance_v1 import Instance client = _Client(self.PROJECT) - instance_pb = admin_v1_pb2.Instance( + instance_pb = Instance( name=self.INSTANCE_NAME, config=self.CONFIG_NAME, display_name=self.DISPLAY_NAME, @@ -498,82 +484,73 @@ def test_database_factory_explicit(self): self.assertIs(pool._bound, database) def test_list_databases(self): - from google.cloud.spanner_admin_database_v1.gapic import database_admin_client - from google.cloud.spanner_admin_database_v1.proto import ( - spanner_database_admin_pb2, - ) - from google.cloud.spanner_v1.database import Database + from google.cloud.spanner_admin_database_v1 import Database as DatabasePB + from google.cloud.spanner_admin_database_v1 import DatabaseAdminClient + from google.cloud.spanner_admin_database_v1 import ListDatabasesRequest + from google.cloud.spanner_admin_database_v1 import ListDatabasesResponse - api = database_admin_client.DatabaseAdminClient(mock.Mock()) + api = DatabaseAdminClient(credentials=mock.Mock()) client = _Client(self.PROJECT) client.database_admin_api = api instance = self._make_one(self.INSTANCE_ID, client) - databases_pb = spanner_database_admin_pb2.ListDatabasesResponse( + databases_pb = ListDatabasesResponse( databases=[ - spanner_database_admin_pb2.Database( - name="{}/databases/aa".format(self.INSTANCE_NAME) - ), - spanner_database_admin_pb2.Database( - name="{}/databases/bb".format(self.INSTANCE_NAME) - ), + DatabasePB(name="{}/databases/aa".format(self.INSTANCE_NAME)), + DatabasePB(name="{}/databases/bb".format(self.INSTANCE_NAME)), ] ) - ld_api = api._inner_api_calls["list_databases"] = mock.Mock( - return_value=databases_pb - ) + ld_api = api._transport._wrapped_methods[ + api._transport.list_databases + ] = mock.Mock(return_value=databases_pb) response = instance.list_databases() databases = list(response) - self.assertIsInstance(databases[0], Database) + self.assertIsInstance(databases[0], DatabasePB) self.assertTrue(databases[0].name.endswith("/aa")) self.assertTrue(databases[1].name.endswith("/bb")) - expected_metadata = [ + expected_metadata = ( ("google-cloud-resource-prefix", instance.name), ("x-goog-request-params", "parent={}".format(instance.name)), - ] + ) ld_api.assert_called_once_with( - spanner_database_admin_pb2.ListDatabasesRequest(parent=self.INSTANCE_NAME), + ListDatabasesRequest(parent=self.INSTANCE_NAME), metadata=expected_metadata, retry=mock.ANY, timeout=mock.ANY, ) def test_list_databases_w_options(self): - from google.cloud.spanner_admin_database_v1.gapic import database_admin_client - from google.cloud.spanner_admin_database_v1.proto import ( - spanner_database_admin_pb2, - ) + from google.cloud.spanner_admin_database_v1 import DatabaseAdminClient + from google.cloud.spanner_admin_database_v1 import ListDatabasesRequest + from google.cloud.spanner_admin_database_v1 import ListDatabasesResponse - api = database_admin_client.DatabaseAdminClient(mock.Mock()) + api = DatabaseAdminClient(credentials=mock.Mock()) client = _Client(self.PROJECT) client.database_admin_api = api instance = self._make_one(self.INSTANCE_ID, client) - databases_pb = spanner_database_admin_pb2.ListDatabasesResponse(databases=[]) + databases_pb = ListDatabasesResponse(databases=[]) - ld_api = api._inner_api_calls["list_databases"] = mock.Mock( - return_value=databases_pb - ) + ld_api = api._transport._wrapped_methods[ + api._transport.list_databases + ] = mock.Mock(return_value=databases_pb) page_size = 42 - page_token = "token" - response = instance.list_databases(page_size=page_size, page_token=page_token) + response = instance.list_databases(page_size=page_size) databases = list(response) self.assertEqual(databases, []) - expected_metadata = [ + expected_metadata = ( ("google-cloud-resource-prefix", instance.name), ("x-goog-request-params", "parent={}".format(instance.name)), - ] + ) ld_api.assert_called_once_with( - spanner_database_admin_pb2.ListDatabasesRequest( - parent=self.INSTANCE_NAME, page_size=page_size, page_token=page_token - ), + ListDatabasesRequest(parent=self.INSTANCE_NAME, page_size=page_size), metadata=expected_metadata, retry=mock.ANY, timeout=mock.ANY, @@ -616,76 +593,78 @@ def test_backup_factory_explicit(self): self.assertIs(backup._expire_time, timestamp) def test_list_backups_defaults(self): - from google.cloud.spanner_admin_database_v1.gapic import database_admin_client - from google.cloud.spanner_admin_database_v1.proto import backup_pb2 - from google.cloud.spanner_v1.backup import Backup + from google.cloud.spanner_admin_database_v1 import Backup as BackupPB + from google.cloud.spanner_admin_database_v1 import DatabaseAdminClient + from google.cloud.spanner_admin_database_v1 import ListBackupsRequest + from google.cloud.spanner_admin_database_v1 import ListBackupsResponse - api = database_admin_client.DatabaseAdminClient(mock.Mock()) + api = DatabaseAdminClient(credentials=mock.Mock()) client = _Client(self.PROJECT) client.database_admin_api = api instance = self._make_one(self.INSTANCE_ID, client) - backups_pb = backup_pb2.ListBackupsResponse( + backups_pb = ListBackupsResponse( backups=[ - backup_pb2.Backup(name=instance.name + "/backups/op1"), - backup_pb2.Backup(name=instance.name + "/backups/op2"), - backup_pb2.Backup(name=instance.name + "/backups/op3"), + BackupPB(name=instance.name + "/backups/op1"), + BackupPB(name=instance.name + "/backups/op2"), + BackupPB(name=instance.name + "/backups/op3"), ] ) - ldo_api = api._inner_api_calls["list_backups"] = mock.Mock( - return_value=backups_pb - ) + lbo_api = api._transport._wrapped_methods[ + api._transport.list_backups + ] = mock.Mock(return_value=backups_pb) backups = instance.list_backups() for backup in backups: - self.assertIsInstance(backup, Backup) + self.assertIsInstance(backup, BackupPB) - expected_metadata = [ + expected_metadata = ( ("google-cloud-resource-prefix", instance.name), ("x-goog-request-params", "parent={}".format(instance.name)), - ] - ldo_api.assert_called_once_with( - backup_pb2.ListBackupsRequest(parent=self.INSTANCE_NAME), + ) + lbo_api.assert_called_once_with( + ListBackupsRequest(parent=self.INSTANCE_NAME), metadata=expected_metadata, retry=mock.ANY, timeout=mock.ANY, ) def test_list_backups_w_options(self): - from google.cloud.spanner_admin_database_v1.gapic import database_admin_client - from google.cloud.spanner_admin_database_v1.proto import backup_pb2 - from google.cloud.spanner_v1.backup import Backup + from google.cloud.spanner_admin_database_v1 import Backup as BackupPB + from google.cloud.spanner_admin_database_v1 import DatabaseAdminClient + from google.cloud.spanner_admin_database_v1 import ListBackupsRequest + from google.cloud.spanner_admin_database_v1 import ListBackupsResponse - api = database_admin_client.DatabaseAdminClient(mock.Mock()) + api = DatabaseAdminClient(credentials=mock.Mock()) client = _Client(self.PROJECT) client.database_admin_api = api instance = self._make_one(self.INSTANCE_ID, client) - backups_pb = backup_pb2.ListBackupsResponse( + backups_pb = ListBackupsResponse( backups=[ - backup_pb2.Backup(name=instance.name + "/backups/op1"), - backup_pb2.Backup(name=instance.name + "/backups/op2"), - backup_pb2.Backup(name=instance.name + "/backups/op3"), + BackupPB(name=instance.name + "/backups/op1"), + BackupPB(name=instance.name + "/backups/op2"), + BackupPB(name=instance.name + "/backups/op3"), ] ) - ldo_api = api._inner_api_calls["list_backups"] = mock.Mock( - return_value=backups_pb - ) + ldo_api = api._transport._wrapped_methods[ + api._transport.list_backups + ] = mock.Mock(return_value=backups_pb) backups = instance.list_backups(filter_="filter", page_size=10) for backup in backups: - self.assertIsInstance(backup, Backup) + self.assertIsInstance(backup, BackupPB) - expected_metadata = [ + expected_metadata = ( ("google-cloud-resource-prefix", instance.name), ("x-goog-request-params", "parent={}".format(instance.name)), - ] + ) ldo_api.assert_called_once_with( - backup_pb2.ListBackupsRequest( + ListBackupsRequest( parent=self.INSTANCE_NAME, filter="filter", page_size=10 ), metadata=expected_metadata, @@ -694,82 +673,86 @@ def test_list_backups_w_options(self): ) def test_list_backup_operations_defaults(self): - from google.api_core.operation import Operation - from google.cloud.spanner_admin_database_v1.gapic import database_admin_client - from google.cloud.spanner_admin_database_v1.proto import backup_pb2 + from google.cloud.spanner_admin_database_v1 import CreateBackupMetadata + from google.cloud.spanner_admin_database_v1 import DatabaseAdminClient + from google.cloud.spanner_admin_database_v1 import ListBackupOperationsRequest + from google.cloud.spanner_admin_database_v1 import ListBackupOperationsResponse from google.longrunning import operations_pb2 from google.protobuf.any_pb2 import Any - api = database_admin_client.DatabaseAdminClient(mock.Mock()) + api = DatabaseAdminClient(credentials=mock.Mock()) client = _Client(self.PROJECT) client.database_admin_api = api instance = self._make_one(self.INSTANCE_ID, client) create_backup_metadata = Any() - create_backup_metadata.Pack(backup_pb2.CreateBackupMetadata()) + create_backup_metadata.Pack( + CreateBackupMetadata.pb( + CreateBackupMetadata(name="backup", database="database") + ) + ) - operations_pb = backup_pb2.ListBackupOperationsResponse( + operations_pb = ListBackupOperationsResponse( operations=[ operations_pb2.Operation(name="op1", metadata=create_backup_metadata) ] ) - ldo_api = api._inner_api_calls["list_backup_operations"] = mock.Mock( - return_value=operations_pb - ) - - operations = instance.list_backup_operations() + ldo_api = api._transport._wrapped_methods[ + api._transport.list_backup_operations + ] = mock.Mock(return_value=operations_pb) - for op in operations: - self.assertIsInstance(op, Operation) + instance.list_backup_operations() - expected_metadata = [ + expected_metadata = ( ("google-cloud-resource-prefix", instance.name), ("x-goog-request-params", "parent={}".format(instance.name)), - ] + ) ldo_api.assert_called_once_with( - backup_pb2.ListBackupOperationsRequest(parent=self.INSTANCE_NAME), + ListBackupOperationsRequest(parent=self.INSTANCE_NAME), metadata=expected_metadata, retry=mock.ANY, timeout=mock.ANY, ) def test_list_backup_operations_w_options(self): - from google.api_core.operation import Operation - from google.cloud.spanner_admin_database_v1.gapic import database_admin_client - from google.cloud.spanner_admin_database_v1.proto import backup_pb2 + from google.cloud.spanner_admin_database_v1 import CreateBackupMetadata + from google.cloud.spanner_admin_database_v1 import DatabaseAdminClient + from google.cloud.spanner_admin_database_v1 import ListBackupOperationsRequest + from google.cloud.spanner_admin_database_v1 import ListBackupOperationsResponse from google.longrunning import operations_pb2 from google.protobuf.any_pb2 import Any - api = database_admin_client.DatabaseAdminClient(mock.Mock()) + api = DatabaseAdminClient(credentials=mock.Mock()) client = _Client(self.PROJECT) client.database_admin_api = api instance = self._make_one(self.INSTANCE_ID, client) create_backup_metadata = Any() - create_backup_metadata.Pack(backup_pb2.CreateBackupMetadata()) + create_backup_metadata.Pack( + CreateBackupMetadata.pb( + CreateBackupMetadata(name="backup", database="database") + ) + ) - operations_pb = backup_pb2.ListBackupOperationsResponse( + operations_pb = ListBackupOperationsResponse( operations=[ operations_pb2.Operation(name="op1", metadata=create_backup_metadata) ] ) - ldo_api = api._inner_api_calls["list_backup_operations"] = mock.Mock( - return_value=operations_pb - ) - - operations = instance.list_backup_operations(filter_="filter", page_size=10) + ldo_api = api._transport._wrapped_methods[ + api._transport.list_backup_operations + ] = mock.Mock(return_value=operations_pb) - for op in operations: - self.assertIsInstance(op, Operation) + instance.list_backup_operations(filter_="filter", page_size=10) - expected_metadata = [ + expected_metadata = ( ("google-cloud-resource-prefix", instance.name), ("x-goog-request-params", "parent={}".format(instance.name)), - ] + ) ldo_api.assert_called_once_with( - backup_pb2.ListBackupOperationsRequest( + ListBackupOperationsRequest( parent=self.INSTANCE_NAME, filter="filter", page_size=10 ), metadata=expected_metadata, @@ -778,30 +761,36 @@ def test_list_backup_operations_w_options(self): ) def test_list_database_operations_defaults(self): - from google.api_core.operation import Operation - from google.cloud.spanner_admin_database_v1.gapic import database_admin_client - from google.cloud.spanner_admin_database_v1.proto import ( - spanner_database_admin_pb2, + from google.cloud.spanner_admin_database_v1 import CreateDatabaseMetadata + from google.cloud.spanner_admin_database_v1 import DatabaseAdminClient + from google.cloud.spanner_admin_database_v1 import ListDatabaseOperationsRequest + from google.cloud.spanner_admin_database_v1 import ( + ListDatabaseOperationsResponse, + ) + from google.cloud.spanner_admin_database_v1 import ( + OptimizeRestoredDatabaseMetadata, ) from google.longrunning import operations_pb2 from google.protobuf.any_pb2 import Any - api = database_admin_client.DatabaseAdminClient(mock.Mock()) + api = DatabaseAdminClient(credentials=mock.Mock()) client = _Client(self.PROJECT) client.database_admin_api = api instance = self._make_one(self.INSTANCE_ID, client) create_database_metadata = Any() create_database_metadata.Pack( - spanner_database_admin_pb2.CreateDatabaseMetadata() + CreateDatabaseMetadata.pb(CreateDatabaseMetadata(database="database")) ) optimize_database_metadata = Any() optimize_database_metadata.Pack( - spanner_database_admin_pb2.OptimizeRestoredDatabaseMetadata() + OptimizeRestoredDatabaseMetadata.pb( + OptimizeRestoredDatabaseMetadata(name="database") + ) ) - databases_pb = spanner_database_admin_pb2.ListDatabaseOperationsResponse( + databases_pb = ListDatabaseOperationsResponse( operations=[ operations_pb2.Operation(name="op1", metadata=create_database_metadata), operations_pb2.Operation( @@ -810,53 +799,59 @@ def test_list_database_operations_defaults(self): ] ) - ldo_api = api._inner_api_calls["list_database_operations"] = mock.Mock( - return_value=databases_pb - ) + ldo_api = api._transport._wrapped_methods[ + api._transport.list_database_operations + ] = mock.Mock(return_value=databases_pb) - operations = instance.list_database_operations() + instance.list_database_operations() - for op in operations: - self.assertIsInstance(op, Operation) - - expected_metadata = [ + expected_metadata = ( ("google-cloud-resource-prefix", instance.name), ("x-goog-request-params", "parent={}".format(instance.name)), - ] + ) ldo_api.assert_called_once_with( - spanner_database_admin_pb2.ListDatabaseOperationsRequest( - parent=self.INSTANCE_NAME - ), + ListDatabaseOperationsRequest(parent=self.INSTANCE_NAME), metadata=expected_metadata, retry=mock.ANY, timeout=mock.ANY, ) def test_list_database_operations_w_options(self): - from google.api_core.operation import Operation - from google.cloud.spanner_admin_database_v1.gapic import database_admin_client - from google.cloud.spanner_admin_database_v1.proto import ( - spanner_database_admin_pb2, - ) + from google.cloud.spanner_admin_database_v1 import DatabaseAdminClient + from google.cloud.spanner_admin_database_v1 import ListDatabaseOperationsRequest + from google.cloud.spanner_admin_database_v1 import ( + ListDatabaseOperationsResponse, + ) + from google.cloud.spanner_admin_database_v1 import RestoreDatabaseMetadata + from google.cloud.spanner_admin_database_v1 import RestoreSourceType + from google.cloud.spanner_admin_database_v1 import UpdateDatabaseDdlMetadata from google.longrunning import operations_pb2 from google.protobuf.any_pb2 import Any - api = database_admin_client.DatabaseAdminClient(mock.Mock()) + api = DatabaseAdminClient(credentials=mock.Mock()) client = _Client(self.PROJECT) client.database_admin_api = api instance = self._make_one(self.INSTANCE_ID, client) restore_database_metadata = Any() restore_database_metadata.Pack( - spanner_database_admin_pb2.RestoreDatabaseMetadata() + RestoreDatabaseMetadata.pb( + RestoreDatabaseMetadata( + name="database", source_type=RestoreSourceType.BACKUP + ) + ) ) update_database_metadata = Any() update_database_metadata.Pack( - spanner_database_admin_pb2.UpdateDatabaseDdlMetadata() + UpdateDatabaseDdlMetadata.pb( + UpdateDatabaseDdlMetadata( + database="database", statements=["statements"] + ) + ) ) - databases_pb = spanner_database_admin_pb2.ListDatabaseOperationsResponse( + databases_pb = ListDatabaseOperationsResponse( operations=[ operations_pb2.Operation( name="op1", metadata=restore_database_metadata @@ -865,21 +860,18 @@ def test_list_database_operations_w_options(self): ] ) - ldo_api = api._inner_api_calls["list_database_operations"] = mock.Mock( - return_value=databases_pb - ) + ldo_api = api._transport._wrapped_methods[ + api._transport.list_database_operations + ] = mock.Mock(return_value=databases_pb) - operations = instance.list_database_operations(filter_="filter", page_size=10) + instance.list_database_operations(filter_="filter", page_size=10) - for op in operations: - self.assertIsInstance(op, Operation) - - expected_metadata = [ + expected_metadata = ( ("google-cloud-resource-prefix", instance.name), ("x-goog-request-params", "parent={}".format(instance.name)), - ] + ) ldo_api.assert_called_once_with( - spanner_database_admin_pb2.ListDatabaseOperationsRequest( + ListDatabaseOperationsRequest( parent=self.INSTANCE_NAME, filter="filter", page_size=10 ), metadata=expected_metadata, @@ -888,8 +880,8 @@ def test_list_database_operations_w_options(self): ) def test_type_string_to_type_pb_hit(self): - from google.cloud.spanner_admin_database_v1.proto import ( - spanner_database_admin_pb2, + from google.cloud.spanner_admin_database_v1 import ( + OptimizeRestoredDatabaseMetadata, ) from google.cloud.spanner_v1 import instance @@ -897,7 +889,7 @@ def test_type_string_to_type_pb_hit(self): self.assertIn(type_string, instance._OPERATION_METADATA_TYPES) self.assertEqual( instance._type_string_to_type_pb(type_string), - spanner_database_admin_pb2.OptimizeRestoredDatabaseMetadata, + OptimizeRestoredDatabaseMetadata, ) def test_type_string_to_type_pb_miss(self): diff --git a/tests/unit/test_keyset.py b/tests/unit/test_keyset.py index ed1473bf01..86a814c752 100644 --- a/tests/unit/test_keyset.py +++ b/tests/unit/test_keyset.py @@ -115,47 +115,32 @@ def test___eq___other(self): self.assertNotEqual(krange, other) def test_to_pb_w_start_closed_and_end_open(self): - from google.protobuf.struct_pb2 import ListValue - from google.protobuf.struct_pb2 import Value - from google.cloud.spanner_v1.proto.keys_pb2 import KeyRange + from google.cloud.spanner_v1.types.keys import KeyRange as KeyRangePB key1 = u"key_1" key2 = u"key_2" key_range = self._make_one(start_closed=[key1], end_open=[key2]) key_range_pb = key_range._to_pb() - expected = KeyRange( - start_closed=ListValue(values=[Value(string_value=key1)]), - end_open=ListValue(values=[Value(string_value=key2)]), - ) + expected = KeyRangePB(start_closed=[key1], end_open=[key2],) self.assertEqual(key_range_pb, expected) def test_to_pb_w_start_open_and_end_closed(self): - from google.protobuf.struct_pb2 import ListValue - from google.protobuf.struct_pb2 import Value - from google.cloud.spanner_v1.proto.keys_pb2 import KeyRange + from google.cloud.spanner_v1.types.keys import KeyRange as KeyRangePB key1 = u"key_1" key2 = u"key_2" key_range = self._make_one(start_open=[key1], end_closed=[key2]) key_range_pb = key_range._to_pb() - expected = KeyRange( - start_open=ListValue(values=[Value(string_value=key1)]), - end_closed=ListValue(values=[Value(string_value=key2)]), - ) + expected = KeyRangePB(start_open=[key1], end_closed=[key2]) self.assertEqual(key_range_pb, expected) def test_to_pb_w_empty_list(self): - from google.protobuf.struct_pb2 import ListValue - from google.protobuf.struct_pb2 import Value - from google.cloud.spanner_v1.proto.keys_pb2 import KeyRange + from google.cloud.spanner_v1.types.keys import KeyRange as KeyRangePB key = u"key" key_range = self._make_one(start_closed=[], end_closed=[key]) key_range_pb = key_range._to_pb() - expected = KeyRange( - start_closed=ListValue(values=[]), - end_closed=ListValue(values=[Value(string_value=key)]), - ) + expected = KeyRangePB(start_closed=[], end_closed=[key]) self.assertEqual(key_range_pb, expected) def test_to_dict_w_start_closed_and_end_open(self): @@ -288,37 +273,38 @@ def test___eq___w_ranges_miss(self): self.assertNotEqual(keyset, other) def test_to_pb_w_all(self): - from google.cloud.spanner_v1.proto.keys_pb2 import KeySet + from google.cloud.spanner_v1 import KeySetPB keyset = self._make_one(all_=True) result = keyset._to_pb() - self.assertIsInstance(result, KeySet) - self.assertTrue(result.all) + self.assertIsInstance(result, KeySetPB) + self.assertTrue(result.all_) self.assertEqual(len(result.keys), 0) self.assertEqual(len(result.ranges), 0) def test_to_pb_w_only_keys(self): - from google.cloud.spanner_v1.proto.keys_pb2 import KeySet + from google.cloud.spanner_v1 import KeySetPB KEYS = [[u"key1"], [u"key2"]] keyset = self._make_one(keys=KEYS) result = keyset._to_pb() - self.assertIsInstance(result, KeySet) - self.assertFalse(result.all) + self.assertIsInstance(result, KeySetPB) + self.assertFalse(result.all_) self.assertEqual(len(result.keys), len(KEYS)) for found, expected in zip(result.keys, KEYS): self.assertEqual(len(found), len(expected)) - self.assertEqual(found.values[0].string_value, expected[0]) + self.assertEqual(found[0], expected[0]) self.assertEqual(len(result.ranges), 0) def test_to_pb_w_only_ranges(self): - from google.cloud.spanner_v1.proto.keys_pb2 import KeySet + from google.cloud.spanner_v1 import KeyRangePB + from google.cloud.spanner_v1 import KeySetPB from google.cloud.spanner_v1.keyset import KeyRange KEY_1 = u"KEY_1" @@ -333,13 +319,17 @@ def test_to_pb_w_only_ranges(self): result = keyset._to_pb() - self.assertIsInstance(result, KeySet) - self.assertFalse(result.all) + self.assertIsInstance(result, KeySetPB) + self.assertFalse(result.all_) self.assertEqual(len(result.keys), 0) self.assertEqual(len(result.ranges), len(RANGES)) - for found, expected in zip(result.ranges, RANGES): - self.assertEqual(found, expected._to_pb()) + expected_ranges = [ + KeyRangePB(start_open=KEY_1, end_closed=KEY_2), + KeyRangePB(start_closed=KEY_3, end_open=KEY_4), + ] + for found, expected in zip(result.ranges, expected_ranges): + self.assertEqual(found, expected) def test_to_dict_w_all(self): keyset = self._make_one(all_=True) diff --git a/tests/unit/test_param_types.py b/tests/unit/test_param_types.py index cb1c548af9..0d6a17c613 100644 --- a/tests/unit/test_param_types.py +++ b/tests/unit/test_param_types.py @@ -18,11 +18,12 @@ class Test_ArrayParamType(unittest.TestCase): def test_it(self): - from google.cloud.spanner_v1.proto import type_pb2 + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import TypeCode from google.cloud.spanner_v1 import param_types - expected = type_pb2.Type( - code=type_pb2.ARRAY, array_element_type=type_pb2.Type(code=type_pb2.INT64) + expected = Type( + code=TypeCode.ARRAY, array_element_type=Type(code=TypeCode.INT64) ) found = param_types.Array(param_types.INT64) @@ -32,20 +33,18 @@ def test_it(self): class Test_Struct(unittest.TestCase): def test_it(self): - from google.cloud.spanner_v1.proto import type_pb2 + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import TypeCode + from google.cloud.spanner_v1 import StructType from google.cloud.spanner_v1 import param_types - struct_type = type_pb2.StructType( + struct_type = StructType( fields=[ - type_pb2.StructType.Field( - name="name", type=type_pb2.Type(code=type_pb2.STRING) - ), - type_pb2.StructType.Field( - name="count", type=type_pb2.Type(code=type_pb2.INT64) - ), + StructType.Field(name="name", type_=Type(code=TypeCode.STRING)), + StructType.Field(name="count", type_=Type(code=TypeCode.INT64)), ] ) - expected = type_pb2.Type(code=type_pb2.STRUCT, struct_type=struct_type) + expected = Type(code=TypeCode.STRUCT, struct_type=struct_type) found = param_types.Struct( [ diff --git a/tests/unit/test_pool.py b/tests/unit/test_pool.py index 6898314955..f4f5675356 100644 --- a/tests/unit/test_pool.py +++ b/tests/unit/test_pool.py @@ -886,18 +886,19 @@ def __init__(self, name): self.name = name self._sessions = [] - def mock_batch_create_sessions(db, session_count=10, timeout=10, metadata=[]): - from google.cloud.spanner_v1.proto import spanner_pb2 + def mock_batch_create_sessions( + database=None, session_count=10, timeout=10, metadata=[] + ): + from google.cloud.spanner_v1 import BatchCreateSessionsResponse + from google.cloud.spanner_v1 import Session - response = spanner_pb2.BatchCreateSessionsResponse() if session_count < 2: - response.session.add() + response = BatchCreateSessionsResponse(session=[Session()]) else: - response.session.add() - response.session.add() + response = BatchCreateSessionsResponse(session=[Session(), Session()]) return response - from google.cloud.spanner_v1.gapic.spanner_client import SpannerClient + from google.cloud.spanner_v1 import SpannerClient self.spanner_api = mock.create_autospec(SpannerClient, instance=True) self.spanner_api.batch_create_sessions.side_effect = mock_batch_create_sessions diff --git a/tests/unit/test_session.py b/tests/unit/test_session.py index e95b9e1a06..0a004e3cd0 100644 --- a/tests/unit/test_session.py +++ b/tests/unit/test_session.py @@ -46,9 +46,9 @@ class TestSession(OpenTelemetryBase): SESSION_NAME = DATABASE_NAME + "/sessions/" + SESSION_ID BASE_ATTRIBUTES = { "db.type": "spanner", - "db.url": "spanner.googleapis.com:443", + "db.url": "spanner.googleapis.com", "db.instance": DATABASE_NAME, - "net.host.name": "spanner.googleapis.com:443", + "net.host.name": "spanner.googleapis.com", } def _getTargetClass(self): @@ -69,12 +69,12 @@ def _make_database(name=DATABASE_NAME): @staticmethod def _make_session_pb(name, labels=None): - from google.cloud.spanner_v1.proto.spanner_pb2 import Session + from google.cloud.spanner_v1 import Session return Session(name=name, labels=labels) def _make_spanner_api(self): - from google.cloud.spanner_v1.gapic.spanner_client import SpannerClient + from google.cloud.spanner_v1 import SpannerClient return mock.Mock(autospec=SpannerClient, instance=True) @@ -125,6 +125,8 @@ def test_create_w_session_id(self): self.assertNoSpans() def test_create_ok(self): + from google.cloud.spanner_v1 import CreateSessionRequest + session_pb = self._make_session_pb(self.SESSION_NAME) gax_api = self._make_spanner_api() gax_api.create_session.return_value = session_pb @@ -136,8 +138,10 @@ def test_create_ok(self): self.assertEqual(session.session_id, self.SESSION_ID) + request = CreateSessionRequest(database=database.name,) + gax_api.create_session.assert_called_once_with( - database.name, metadata=[("google-cloud-resource-prefix", database.name)] + request=request, metadata=[("google-cloud-resource-prefix", database.name)] ) self.assertSpanAttributes( @@ -145,6 +149,9 @@ def test_create_ok(self): ) def test_create_w_labels(self): + from google.cloud.spanner_v1 import CreateSessionRequest + from google.cloud.spanner_v1 import Session as SessionPB + labels = {"foo": "bar"} session_pb = self._make_session_pb(self.SESSION_NAME, labels=labels) gax_api = self._make_spanner_api() @@ -157,10 +164,12 @@ def test_create_w_labels(self): self.assertEqual(session.session_id, self.SESSION_ID) + request = CreateSessionRequest( + database=database.name, session=SessionPB(labels=labels), + ) + gax_api.create_session.assert_called_once_with( - database.name, - session={"labels": labels}, - metadata=[("google-cloud-resource-prefix", database.name)], + request=request, metadata=[("google-cloud-resource-prefix", database.name)], ) self.assertSpanAttributes( @@ -205,7 +214,7 @@ def test_exists_hit(self): self.assertTrue(session.exists()) gax_api.get_session.assert_called_once_with( - self.SESSION_NAME, + name=self.SESSION_NAME, metadata=[("google-cloud-resource-prefix", database.name)], ) @@ -214,6 +223,28 @@ def test_exists_hit(self): attributes=dict(TestSession.BASE_ATTRIBUTES, session_found=True), ) + @mock.patch( + "google.cloud.spanner_v1._opentelemetry_tracing.HAS_OPENTELEMETRY_INSTALLED", + False, + ) + def test_exists_hit_wo_span(self): + session_pb = self._make_session_pb(self.SESSION_NAME) + gax_api = self._make_spanner_api() + gax_api.get_session.return_value = session_pb + database = self._make_database() + database.spanner_api = gax_api + session = self._make_one(database) + session._session_id = self.SESSION_ID + + self.assertTrue(session.exists()) + + gax_api.get_session.assert_called_once_with( + name=self.SESSION_NAME, + metadata=[("google-cloud-resource-prefix", database.name)], + ) + + self.assertNoSpans() + def test_exists_miss(self): from google.api_core.exceptions import NotFound @@ -227,7 +258,7 @@ def test_exists_miss(self): self.assertFalse(session.exists()) gax_api.get_session.assert_called_once_with( - self.SESSION_NAME, + name=self.SESSION_NAME, metadata=[("google-cloud-resource-prefix", database.name)], ) @@ -236,6 +267,29 @@ def test_exists_miss(self): attributes=dict(TestSession.BASE_ATTRIBUTES, session_found=False), ) + @mock.patch( + "google.cloud.spanner_v1._opentelemetry_tracing.HAS_OPENTELEMETRY_INSTALLED", + False, + ) + def test_exists_miss_wo_span(self): + from google.api_core.exceptions import NotFound + + gax_api = self._make_spanner_api() + gax_api.get_session.side_effect = NotFound("testing") + database = self._make_database() + database.spanner_api = gax_api + session = self._make_one(database) + session._session_id = self.SESSION_ID + + self.assertFalse(session.exists()) + + gax_api.get_session.assert_called_once_with( + name=self.SESSION_NAME, + metadata=[("google-cloud-resource-prefix", database.name)], + ) + + self.assertNoSpans() + def test_exists_error(self): from google.api_core.exceptions import Unknown @@ -250,7 +304,7 @@ def test_exists_error(self): session.exists() gax_api.get_session.assert_called_once_with( - self.SESSION_NAME, + name=self.SESSION_NAME, metadata=[("google-cloud-resource-prefix", database.name)], ) @@ -267,6 +321,8 @@ def test_ping_wo_session_id(self): session.ping() def test_ping_hit(self): + from google.cloud.spanner_v1 import ExecuteSqlRequest + gax_api = self._make_spanner_api() gax_api.execute_sql.return_value = "1" database = self._make_database() @@ -276,14 +332,15 @@ def test_ping_hit(self): session.ping() + request = ExecuteSqlRequest(session=self.SESSION_NAME, sql="SELECT 1",) + gax_api.execute_sql.assert_called_once_with( - self.SESSION_NAME, - "SELECT 1", - metadata=[("google-cloud-resource-prefix", database.name)], + request=request, metadata=[("google-cloud-resource-prefix", database.name)], ) def test_ping_miss(self): from google.api_core.exceptions import NotFound + from google.cloud.spanner_v1 import ExecuteSqlRequest gax_api = self._make_spanner_api() gax_api.execute_sql.side_effect = NotFound("testing") @@ -295,14 +352,15 @@ def test_ping_miss(self): with self.assertRaises(NotFound): session.ping() + request = ExecuteSqlRequest(session=self.SESSION_NAME, sql="SELECT 1",) + gax_api.execute_sql.assert_called_once_with( - self.SESSION_NAME, - "SELECT 1", - metadata=[("google-cloud-resource-prefix", database.name)], + request=request, metadata=[("google-cloud-resource-prefix", database.name)], ) def test_ping_error(self): from google.api_core.exceptions import Unknown + from google.cloud.spanner_v1 import ExecuteSqlRequest gax_api = self._make_spanner_api() gax_api.execute_sql.side_effect = Unknown("testing") @@ -314,10 +372,10 @@ def test_ping_error(self): with self.assertRaises(Unknown): session.ping() + request = ExecuteSqlRequest(session=self.SESSION_NAME, sql="SELECT 1",) + gax_api.execute_sql.assert_called_once_with( - self.SESSION_NAME, - "SELECT 1", - metadata=[("google-cloud-resource-prefix", database.name)], + request=request, metadata=[("google-cloud-resource-prefix", database.name)], ) def test_delete_wo_session_id(self): @@ -340,7 +398,7 @@ def test_delete_hit(self): session.delete() gax_api.delete_session.assert_called_once_with( - self.SESSION_NAME, + name=self.SESSION_NAME, metadata=[("google-cloud-resource-prefix", database.name)], ) @@ -362,7 +420,7 @@ def test_delete_miss(self): session.delete() gax_api.delete_session.assert_called_once_with( - self.SESSION_NAME, + name=self.SESSION_NAME, metadata=[("google-cloud-resource-prefix", database.name)], ) @@ -386,7 +444,7 @@ def test_delete_error(self): session.delete() gax_api.delete_session.assert_called_once_with( - self.SESSION_NAME, + name=self.SESSION_NAME, metadata=[("google-cloud-resource-prefix", database.name)], ) @@ -497,7 +555,7 @@ def test_execute_sql_defaults(self): def test_execute_sql_non_default_retry(self): from google.protobuf.struct_pb2 import Struct, Value - from google.cloud.spanner_v1.proto.type_pb2 import STRING + from google.cloud.spanner_v1 import TypeCode SQL = "SELECT first_name, age FROM citizens" database = self._make_database() @@ -505,7 +563,7 @@ def test_execute_sql_non_default_retry(self): session._session_id = "DEADBEEF" params = Struct(fields={"foo": Value(string_value="bar")}) - param_types = {"foo": STRING} + param_types = {"foo": TypeCode.STRING} with mock.patch("google.cloud.spanner_v1.session.Snapshot") as snapshot: found = session.execute_sql( @@ -526,7 +584,7 @@ def test_execute_sql_non_default_retry(self): def test_execute_sql_explicit(self): from google.protobuf.struct_pb2 import Struct, Value - from google.cloud.spanner_v1.proto.type_pb2 import STRING + from google.cloud.spanner_v1 import TypeCode SQL = "SELECT first_name, age FROM citizens" database = self._make_database() @@ -534,7 +592,7 @@ def test_execute_sql_explicit(self): session._session_id = "DEADBEEF" params = Struct(fields={"foo": Value(string_value="bar")}) - param_types = {"foo": STRING} + param_types = {"foo": TypeCode.STRING} with mock.patch("google.cloud.spanner_v1.session.Snapshot") as snapshot: found = session.execute_sql(SQL, params, param_types, "PLAN") @@ -602,7 +660,7 @@ def test_transaction_w_existing_txn(self): self.assertTrue(existing.rolled_back) def test_run_in_transaction_callback_raises_non_gax_error(self): - from google.cloud.spanner_v1.proto.transaction_pb2 import ( + from google.cloud.spanner_v1 import ( Transaction as TransactionPB, TransactionOptions, ) @@ -648,19 +706,19 @@ def unit_of_work(txn, *args, **kw): expected_options = TransactionOptions(read_write=TransactionOptions.ReadWrite()) gax_api.begin_transaction.assert_called_once_with( - self.SESSION_NAME, - expected_options, + session=self.SESSION_NAME, + options=expected_options, metadata=[("google-cloud-resource-prefix", database.name)], ) gax_api.rollback.assert_called_once_with( - self.SESSION_NAME, - TRANSACTION_ID, + session=self.SESSION_NAME, + transaction_id=TRANSACTION_ID, metadata=[("google-cloud-resource-prefix", database.name)], ) def test_run_in_transaction_callback_raises_non_abort_rpc_error(self): from google.api_core.exceptions import Cancelled - from google.cloud.spanner_v1.proto.transaction_pb2 import ( + from google.cloud.spanner_v1 import ( Transaction as TransactionPB, TransactionOptions, ) @@ -703,16 +761,16 @@ def unit_of_work(txn, *args, **kw): expected_options = TransactionOptions(read_write=TransactionOptions.ReadWrite()) gax_api.begin_transaction.assert_called_once_with( - self.SESSION_NAME, - expected_options, + session=self.SESSION_NAME, + options=expected_options, metadata=[("google-cloud-resource-prefix", database.name)], ) gax_api.rollback.assert_not_called() def test_run_in_transaction_w_args_w_kwargs_wo_abort(self): import datetime - from google.cloud.spanner_v1.proto.spanner_pb2 import CommitResponse - from google.cloud.spanner_v1.proto.transaction_pb2 import ( + from google.cloud.spanner_v1 import CommitResponse + from google.cloud.spanner_v1 import ( Transaction as TransactionPB, TransactionOptions, ) @@ -758,12 +816,12 @@ def unit_of_work(txn, *args, **kw): expected_options = TransactionOptions(read_write=TransactionOptions.ReadWrite()) gax_api.begin_transaction.assert_called_once_with( - self.SESSION_NAME, - expected_options, + session=self.SESSION_NAME, + options=expected_options, metadata=[("google-cloud-resource-prefix", database.name)], ) gax_api.commit.assert_called_once_with( - self.SESSION_NAME, + session=self.SESSION_NAME, mutations=txn._mutations, transaction_id=TRANSACTION_ID, metadata=[("google-cloud-resource-prefix", database.name)], @@ -810,7 +868,7 @@ def unit_of_work(txn, *args, **kw): gax_api.begin_transaction.assert_not_called() gax_api.commit.assert_called_once_with( - self.SESSION_NAME, + session=self.SESSION_NAME, mutations=txn._mutations, transaction_id=TRANSACTION_ID, metadata=[("google-cloud-resource-prefix", database.name)], @@ -819,8 +877,8 @@ def unit_of_work(txn, *args, **kw): def test_run_in_transaction_w_abort_no_retry_metadata(self): import datetime from google.api_core.exceptions import Aborted - from google.cloud.spanner_v1.proto.spanner_pb2 import CommitResponse - from google.cloud.spanner_v1.proto.transaction_pb2 import ( + from google.cloud.spanner_v1 import CommitResponse + from google.cloud.spanner_v1 import ( Transaction as TransactionPB, TransactionOptions, ) @@ -869,8 +927,8 @@ def unit_of_work(txn, *args, **kw): gax_api.begin_transaction.call_args_list, [ mock.call( - self.SESSION_NAME, - expected_options, + session=self.SESSION_NAME, + options=expected_options, metadata=[("google-cloud-resource-prefix", database.name)], ) ] @@ -880,7 +938,7 @@ def unit_of_work(txn, *args, **kw): gax_api.commit.call_args_list, [ mock.call( - self.SESSION_NAME, + session=self.SESSION_NAME, mutations=txn._mutations, transaction_id=TRANSACTION_ID, metadata=[("google-cloud-resource-prefix", database.name)], @@ -894,8 +952,8 @@ def test_run_in_transaction_w_abort_w_retry_metadata(self): from google.api_core.exceptions import Aborted from google.protobuf.duration_pb2 import Duration from google.rpc.error_details_pb2 import RetryInfo - from google.cloud.spanner_v1.proto.spanner_pb2 import CommitResponse - from google.cloud.spanner_v1.proto.transaction_pb2 import ( + from google.cloud.spanner_v1 import CommitResponse + from google.cloud.spanner_v1 import ( Transaction as TransactionPB, TransactionOptions, ) @@ -957,8 +1015,8 @@ def unit_of_work(txn, *args, **kw): gax_api.begin_transaction.call_args_list, [ mock.call( - self.SESSION_NAME, - expected_options, + session=self.SESSION_NAME, + options=expected_options, metadata=[("google-cloud-resource-prefix", database.name)], ) ] @@ -968,7 +1026,7 @@ def unit_of_work(txn, *args, **kw): gax_api.commit.call_args_list, [ mock.call( - self.SESSION_NAME, + session=self.SESSION_NAME, mutations=txn._mutations, transaction_id=TRANSACTION_ID, metadata=[("google-cloud-resource-prefix", database.name)], @@ -982,8 +1040,8 @@ def test_run_in_transaction_w_callback_raises_abort_wo_metadata(self): from google.api_core.exceptions import Aborted from google.protobuf.duration_pb2 import Duration from google.rpc.error_details_pb2 import RetryInfo - from google.cloud.spanner_v1.proto.spanner_pb2 import CommitResponse - from google.cloud.spanner_v1.proto.transaction_pb2 import ( + from google.cloud.spanner_v1 import CommitResponse + from google.cloud.spanner_v1 import ( Transaction as TransactionPB, TransactionOptions, ) @@ -1045,15 +1103,15 @@ def unit_of_work(txn, *args, **kw): gax_api.begin_transaction.call_args_list, [ mock.call( - self.SESSION_NAME, - expected_options, + session=self.SESSION_NAME, + options=expected_options, metadata=[("google-cloud-resource-prefix", database.name)], ) ] * 2, ) gax_api.commit.assert_called_once_with( - self.SESSION_NAME, + session=self.SESSION_NAME, mutations=txn._mutations, transaction_id=TRANSACTION_ID, metadata=[("google-cloud-resource-prefix", database.name)], @@ -1064,8 +1122,8 @@ def test_run_in_transaction_w_abort_w_retry_metadata_deadline(self): from google.api_core.exceptions import Aborted from google.protobuf.duration_pb2 import Duration from google.rpc.error_details_pb2 import RetryInfo - from google.cloud.spanner_v1.proto.spanner_pb2 import CommitResponse - from google.cloud.spanner_v1.proto.transaction_pb2 import ( + from google.cloud.spanner_v1 import CommitResponse + from google.cloud.spanner_v1 import ( Transaction as TransactionPB, TransactionOptions, ) @@ -1135,12 +1193,12 @@ def _time(_results=[1, 1.5]): expected_options = TransactionOptions(read_write=TransactionOptions.ReadWrite()) gax_api.begin_transaction.assert_called_once_with( - self.SESSION_NAME, - expected_options, + session=self.SESSION_NAME, + options=expected_options, metadata=[("google-cloud-resource-prefix", database.name)], ) gax_api.commit.assert_called_once_with( - self.SESSION_NAME, + session=self.SESSION_NAME, mutations=txn._mutations, transaction_id=TRANSACTION_ID, metadata=[("google-cloud-resource-prefix", database.name)], @@ -1148,7 +1206,7 @@ def _time(_results=[1, 1.5]): def test_run_in_transaction_w_timeout(self): from google.api_core.exceptions import Aborted - from google.cloud.spanner_v1.proto.transaction_pb2 import ( + from google.cloud.spanner_v1 import ( Transaction as TransactionPB, TransactionOptions, ) @@ -1210,8 +1268,8 @@ def _time(_results=[1, 2, 4, 8]): gax_api.begin_transaction.call_args_list, [ mock.call( - self.SESSION_NAME, - expected_options, + session=self.SESSION_NAME, + options=expected_options, metadata=[("google-cloud-resource-prefix", database.name)], ) ] @@ -1221,7 +1279,7 @@ def _time(_results=[1, 2, 4, 8]): gax_api.commit.call_args_list, [ mock.call( - self.SESSION_NAME, + session=self.SESSION_NAME, mutations=txn._mutations, transaction_id=TRANSACTION_ID, metadata=[("google-cloud-resource-prefix", database.name)], diff --git a/tests/unit/test_snapshot.py b/tests/unit/test_snapshot.py index 8589a0c363..5250e41c95 100644 --- a/tests/unit/test_snapshot.py +++ b/tests/unit/test_snapshot.py @@ -20,6 +20,7 @@ StatusCanonicalCode, HAS_OPENTELEMETRY_INSTALLED, ) +from google.cloud.spanner_v1.param_types import INT64 TABLE_NAME = "citizens" COLUMNS = ["email", "first_name", "last_name", "age"] @@ -28,7 +29,7 @@ SQL_QUERY_WITH_PARAM = """ SELECT first_name, last_name, email FROM citizens WHERE age <= @max_age""" PARAMS = {"max_age": 30} -PARAM_TYPES = {"max_age": "INT64"} +PARAM_TYPES = {"max_age": INT64} SQL_QUERY_WITH_BYTES_PARAM = """\ SELECT image_name FROM images WHERE @bytes IN image_data""" PARAMS_WITH_BYTES = {"bytes": b"FACEDACE"} @@ -38,9 +39,9 @@ MICROS = 123456 BASE_ATTRIBUTES = { "db.type": "spanner", - "db.url": "spanner.googleapis.com:443", + "db.url": "spanner.googleapis.com", "db.instance": "testing", - "net.host.name": "spanner.googleapis.com:443", + "net.host.name": "spanner.googleapis.com", } @@ -283,12 +284,12 @@ def test_iteration_w_multiple_span_creation(self): for span in span_list: self.assertEqual(span.name, name) self.assertEqual( - span.attributes, + dict(span.attributes), { "db.type": "spanner", - "db.url": "spanner.googleapis.com:443", + "db.url": "spanner.googleapis.com", "db.instance": "testing", - "net.host.name": "spanner.googleapis.com:443", + "net.host.name": "spanner.googleapis.com", }, ) @@ -318,7 +319,7 @@ class _Derived(self._getTargetClass()): _multi_use = False def _make_txn_selector(self): - from google.cloud.spanner_v1.proto.transaction_pb2 import ( + from google.cloud.spanner_v1 import ( TransactionOptions, TransactionSelector, ) @@ -335,11 +336,9 @@ def _make_txn_selector(self): return _Derived(session) def _make_spanner_api(self): - import google.cloud.spanner_v1.gapic.spanner_client + from google.cloud.spanner_v1 import SpannerClient - return mock.create_autospec( - google.cloud.spanner_v1.gapic.spanner_client.SpannerClient, instance=True - ) + return mock.create_autospec(SpannerClient, instance=True) def test_ctor(self): session = _Session() @@ -378,26 +377,26 @@ def test_read_other_error(self): def _read_helper(self, multi_use, first=True, count=0, partition=None): from google.protobuf.struct_pb2 import Struct - from google.cloud.spanner_v1.proto.result_set_pb2 import ( + from google.cloud.spanner_v1 import ( PartialResultSet, ResultSetMetadata, ResultSetStats, ) - from google.cloud.spanner_v1.proto.transaction_pb2 import ( + from google.cloud.spanner_v1 import ( TransactionSelector, TransactionOptions, ) - from google.cloud.spanner_v1.proto.type_pb2 import Type, StructType - from google.cloud.spanner_v1.proto.type_pb2 import STRING, INT64 + from google.cloud.spanner_v1 import ReadRequest + from google.cloud.spanner_v1 import Type, StructType + from google.cloud.spanner_v1 import TypeCode from google.cloud.spanner_v1.keyset import KeySet from google.cloud.spanner_v1._helpers import _make_value_pb VALUES = [[u"bharney", 31], [u"phred", 32]] - VALUE_PBS = [[_make_value_pb(item) for item in row] for row in VALUES] struct_type_pb = StructType( fields=[ - StructType.Field(name="name", type=Type(code=STRING)), - StructType.Field(name="age", type=Type(code=INT64)), + StructType.Field(name="name", type_=Type(code=TypeCode.STRING)), + StructType.Field(name="age", type_=Type(code=TypeCode.INT64)), ] ) metadata_pb = ResultSetMetadata(row_type=struct_type_pb) @@ -405,9 +404,11 @@ def _read_helper(self, multi_use, first=True, count=0, partition=None): query_stats=Struct(fields={"rows_returned": _make_value_pb(2)}) ) result_sets = [ - PartialResultSet(values=VALUE_PBS[0], metadata=metadata_pb), - PartialResultSet(values=VALUE_PBS[1], stats=stats_pb), + PartialResultSet(metadata=metadata_pb), + PartialResultSet(stats=stats_pb), ] + for i in range(len(result_sets)): + result_sets[i].values.extend(VALUES[i]) KEYS = [["bharney@example.com"], ["phred@example.com"]] keyset = KeySet(keys=KEYS) INDEX = "email-address-index" @@ -459,15 +460,18 @@ def _read_helper(self, multi_use, first=True, count=0, partition=None): else: expected_limit = LIMIT - api.streaming_read.assert_called_once_with( - self.SESSION_NAME, - TABLE_NAME, - COLUMNS, - keyset._to_pb(), + expected_request = ReadRequest( + session=self.SESSION_NAME, + table=TABLE_NAME, + columns=COLUMNS, + key_set=keyset._to_pb(), transaction=expected_transaction, index=INDEX, limit=expected_limit, partition_token=partition, + ) + api.streaming_read.assert_called_once_with( + request=expected_request, metadata=[("google-cloud-resource-prefix", database.name)], ) @@ -539,30 +543,30 @@ def _execute_sql_helper( retry=google.api_core.gapic_v1.method.DEFAULT, ): from google.protobuf.struct_pb2 import Struct - from google.cloud.spanner_v1.proto.result_set_pb2 import ( + from google.cloud.spanner_v1 import ( PartialResultSet, ResultSetMetadata, ResultSetStats, ) - from google.cloud.spanner_v1.proto.transaction_pb2 import ( + from google.cloud.spanner_v1 import ( TransactionSelector, TransactionOptions, ) - from google.cloud.spanner_v1.proto.type_pb2 import Type, StructType - from google.cloud.spanner_v1.proto.type_pb2 import STRING, INT64 + from google.cloud.spanner_v1 import ExecuteSqlRequest + from google.cloud.spanner_v1 import Type, StructType + from google.cloud.spanner_v1 import TypeCode from google.cloud.spanner_v1._helpers import ( _make_value_pb, _merge_query_options, ) VALUES = [[u"bharney", u"rhubbyl", 31], [u"phred", u"phlyntstone", 32]] - VALUE_PBS = [[_make_value_pb(item) for item in row] for row in VALUES] MODE = 2 # PROFILE struct_type_pb = StructType( fields=[ - StructType.Field(name="first_name", type=Type(code=STRING)), - StructType.Field(name="last_name", type=Type(code=STRING)), - StructType.Field(name="age", type=Type(code=INT64)), + StructType.Field(name="first_name", type_=Type(code=TypeCode.STRING)), + StructType.Field(name="last_name", type_=Type(code=TypeCode.STRING)), + StructType.Field(name="age", type_=Type(code=TypeCode.INT64)), ] ) metadata_pb = ResultSetMetadata(row_type=struct_type_pb) @@ -570,9 +574,11 @@ def _execute_sql_helper( query_stats=Struct(fields={"rows_returned": _make_value_pb(2)}) ) result_sets = [ - PartialResultSet(values=VALUE_PBS[0], metadata=metadata_pb), - PartialResultSet(values=VALUE_PBS[1], stats=stats_pb), + PartialResultSet(metadata=metadata_pb), + PartialResultSet(stats=stats_pb), ] + for i in range(len(result_sets)): + result_sets[i].values.extend(VALUES[i]) iterator = _MockIterator(*result_sets) database = _Database() api = database.spanner_api = self._make_spanner_api() @@ -629,9 +635,9 @@ def _execute_sql_helper( expected_query_options, query_options ) - api.execute_streaming_sql.assert_called_once_with( - self.SESSION_NAME, - SQL_QUERY_WITH_PARAM, + expected_request = ExecuteSqlRequest( + session=self.SESSION_NAME, + sql=SQL_QUERY_WITH_PARAM, transaction=expected_transaction, params=expected_params, param_types=PARAM_TYPES, @@ -639,6 +645,9 @@ def _execute_sql_helper( query_options=expected_query_options, partition_token=partition, seqno=sql_count, + ) + api.execute_streaming_sql.assert_called_once_with( + request=expected_request, metadata=[("google-cloud-resource-prefix", database.name)], timeout=timeout, retry=retry, @@ -679,7 +688,7 @@ def test_execute_sql_w_timeout(self): self._execute_sql_helper(multi_use=False, timeout=None) def test_execute_sql_w_query_options(self): - from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest + from google.cloud.spanner_v1 import ExecuteSqlRequest self._execute_sql_helper( multi_use=False, @@ -690,11 +699,12 @@ def _partition_read_helper( self, multi_use, w_txn, size=None, max_partitions=None, index=None ): from google.cloud.spanner_v1.keyset import KeySet - from google.cloud.spanner_v1.types import Partition - from google.cloud.spanner_v1.types import PartitionOptions - from google.cloud.spanner_v1.types import PartitionResponse - from google.cloud.spanner_v1.types import Transaction - from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionSelector + from google.cloud.spanner_v1 import Partition + from google.cloud.spanner_v1 import PartitionOptions + from google.cloud.spanner_v1 import PartitionReadRequest + from google.cloud.spanner_v1 import PartitionResponse + from google.cloud.spanner_v1 import Transaction + from google.cloud.spanner_v1 import TransactionSelector keyset = KeySet(all_=True) new_txn_id = b"ABECAB91" @@ -735,7 +745,7 @@ def _partition_read_helper( partition_size_bytes=size, max_partitions=max_partitions ) - api.partition_read.assert_called_once_with( + expected_request = PartitionReadRequest( session=self.SESSION_NAME, table=TABLE_NAME, columns=COLUMNS, @@ -743,6 +753,9 @@ def _partition_read_helper( transaction=expected_txn_selector, index=index, partition_options=expected_partition_options, + ) + api.partition_read.assert_called_once_with( + request=expected_request, metadata=[("google-cloud-resource-prefix", database.name)], ) @@ -796,11 +809,12 @@ def test_partition_read_ok_w_max_partitions(self): def _partition_query_helper(self, multi_use, w_txn, size=None, max_partitions=None): from google.protobuf.struct_pb2 import Struct - from google.cloud.spanner_v1.types import Partition - from google.cloud.spanner_v1.types import PartitionOptions - from google.cloud.spanner_v1.types import PartitionResponse - from google.cloud.spanner_v1.types import Transaction - from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionSelector + from google.cloud.spanner_v1 import Partition + from google.cloud.spanner_v1 import PartitionOptions + from google.cloud.spanner_v1 import PartitionQueryRequest + from google.cloud.spanner_v1 import PartitionResponse + from google.cloud.spanner_v1 import Transaction + from google.cloud.spanner_v1 import TransactionSelector from google.cloud.spanner_v1._helpers import _make_value_pb new_txn_id = b"ABECAB91" @@ -844,13 +858,16 @@ def _partition_query_helper(self, multi_use, w_txn, size=None, max_partitions=No partition_size_bytes=size, max_partitions=max_partitions ) - api.partition_query.assert_called_once_with( + expected_request = PartitionQueryRequest( session=self.SESSION_NAME, sql=SQL_QUERY_WITH_PARAM, transaction=expected_txn_selector, params=expected_params, param_types=PARAM_TYPES, partition_options=expected_partition_options, + ) + api.partition_query.assert_called_once_with( + request=expected_request, metadata=[("google-cloud-resource-prefix", database.name)], ) @@ -927,11 +944,9 @@ def _make_one(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def _make_spanner_api(self): - import google.cloud.spanner_v1.gapic.spanner_client + from google.cloud.spanner_v1 import SpannerClient - return mock.create_autospec( - google.cloud.spanner_v1.gapic.spanner_client.SpannerClient, instance=True - ) + return mock.create_autospec(SpannerClient, instance=True) def _makeTimestamp(self): import datetime @@ -1083,7 +1098,10 @@ def test__make_txn_selector_w_read_timestamp(self): selector = snapshot._make_txn_selector() options = selector.single_use self.assertEqual( - _pb_timestamp_to_datetime(options.read_only.read_timestamp), timestamp + _pb_timestamp_to_datetime( + type(options).pb(options).read_only.read_timestamp + ), + timestamp, ) def test__make_txn_selector_w_min_read_timestamp(self): @@ -1095,7 +1113,10 @@ def test__make_txn_selector_w_min_read_timestamp(self): selector = snapshot._make_txn_selector() options = selector.single_use self.assertEqual( - _pb_timestamp_to_datetime(options.read_only.min_read_timestamp), timestamp + _pb_timestamp_to_datetime( + type(options).pb(options).read_only.min_read_timestamp + ), + timestamp, ) def test__make_txn_selector_w_max_staleness(self): @@ -1104,8 +1125,10 @@ def test__make_txn_selector_w_max_staleness(self): snapshot = self._make_one(session, max_staleness=duration) selector = snapshot._make_txn_selector() options = selector.single_use - self.assertEqual(options.read_only.max_staleness.seconds, 3) - self.assertEqual(options.read_only.max_staleness.nanos, 123456000) + self.assertEqual(type(options).pb(options).read_only.max_staleness.seconds, 3) + self.assertEqual( + type(options).pb(options).read_only.max_staleness.nanos, 123456000 + ) def test__make_txn_selector_w_exact_staleness(self): duration = self._makeDuration(seconds=3, microseconds=123456) @@ -1113,8 +1136,10 @@ def test__make_txn_selector_w_exact_staleness(self): snapshot = self._make_one(session, exact_staleness=duration) selector = snapshot._make_txn_selector() options = selector.single_use - self.assertEqual(options.read_only.exact_staleness.seconds, 3) - self.assertEqual(options.read_only.exact_staleness.nanos, 123456000) + self.assertEqual(type(options).pb(options).read_only.exact_staleness.seconds, 3) + self.assertEqual( + type(options).pb(options).read_only.exact_staleness.nanos, 123456000 + ) def test__make_txn_selector_strong_w_multi_use(self): session = _Session() @@ -1132,7 +1157,10 @@ def test__make_txn_selector_w_read_timestamp_w_multi_use(self): selector = snapshot._make_txn_selector() options = selector.begin self.assertEqual( - _pb_timestamp_to_datetime(options.read_only.read_timestamp), timestamp + _pb_timestamp_to_datetime( + type(options).pb(options).read_only.read_timestamp + ), + timestamp, ) def test__make_txn_selector_w_exact_staleness_w_multi_use(self): @@ -1141,8 +1169,10 @@ def test__make_txn_selector_w_exact_staleness_w_multi_use(self): snapshot = self._make_one(session, exact_staleness=duration, multi_use=True) selector = snapshot._make_txn_selector() options = selector.begin - self.assertEqual(options.read_only.exact_staleness.seconds, 3) - self.assertEqual(options.read_only.exact_staleness.nanos, 123456000) + self.assertEqual(type(options).pb(options).read_only.exact_staleness.seconds, 3) + self.assertEqual( + type(options).pb(options).read_only.exact_staleness.nanos, 123456000 + ) def test_begin_wo_multi_use(self): session = _Session() @@ -1183,7 +1213,7 @@ def test_begin_w_other_error(self): def test_begin_ok_exact_staleness(self): from google.protobuf.duration_pb2 import Duration - from google.cloud.spanner_v1.proto.transaction_pb2 import ( + from google.cloud.spanner_v1 import ( Transaction as TransactionPB, TransactionOptions, ) @@ -1207,8 +1237,8 @@ def test_begin_ok_exact_staleness(self): ) api.begin_transaction.assert_called_once_with( - session.name, - expected_txn_options, + session=session.name, + options=expected_txn_options, metadata=[("google-cloud-resource-prefix", database.name)], ) @@ -1219,7 +1249,7 @@ def test_begin_ok_exact_staleness(self): ) def test_begin_ok_exact_strong(self): - from google.cloud.spanner_v1.proto.transaction_pb2 import ( + from google.cloud.spanner_v1 import ( Transaction as TransactionPB, TransactionOptions, ) @@ -1241,8 +1271,8 @@ def test_begin_ok_exact_strong(self): ) api.begin_transaction.assert_called_once_with( - session.name, - expected_txn_options, + session=session.name, + options=expected_txn_options, metadata=[("google-cloud-resource-prefix", database.name)], ) @@ -1255,7 +1285,7 @@ def test_begin_ok_exact_strong(self): class _Client(object): def __init__(self): - from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest + from google.cloud.spanner_v1 import ExecuteSqlRequest self._query_options = ExecuteSqlRequest.QueryOptions(optimizer_version="1") diff --git a/tests/unit/test_streamed.py b/tests/unit/test_streamed.py index d53ba3b21d..4a31c5d179 100644 --- a/tests/unit/test_streamed.py +++ b/tests/unit/test_streamed.py @@ -54,32 +54,34 @@ def test_fields_unset(self): @staticmethod def _make_scalar_field(name, type_): - from google.cloud.spanner_v1.proto.type_pb2 import StructType - from google.cloud.spanner_v1.proto.type_pb2 import Type + from google.cloud.spanner_v1 import StructType + from google.cloud.spanner_v1 import Type - return StructType.Field(name=name, type=Type(code=type_)) + return StructType.Field(name=name, type_=Type(code=type_)) @staticmethod def _make_array_field(name, element_type_code=None, element_type=None): - from google.cloud.spanner_v1.proto.type_pb2 import StructType - from google.cloud.spanner_v1.proto.type_pb2 import Type + from google.cloud.spanner_v1 import StructType + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import TypeCode if element_type is None: element_type = Type(code=element_type_code) - array_type = Type(code="ARRAY", array_element_type=element_type) - return StructType.Field(name=name, type=array_type) + array_type = Type(code=TypeCode.ARRAY, array_element_type=element_type) + return StructType.Field(name=name, type_=array_type) @staticmethod def _make_struct_type(struct_type_fields): - from google.cloud.spanner_v1.proto.type_pb2 import StructType - from google.cloud.spanner_v1.proto.type_pb2 import Type + from google.cloud.spanner_v1 import StructType + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import TypeCode fields = [ - StructType.Field(name=key, type=Type(code=value)) + StructType.Field(name=key, type_=Type(code=value)) for key, value in struct_type_fields ] struct_type = StructType(fields=fields) - return Type(code="STRUCT", struct_type=struct_type) + return Type(code=TypeCode.STRUCT, struct_type=struct_type) @staticmethod def _make_value(value): @@ -87,30 +89,21 @@ def _make_value(value): return _make_value_pb(value) - @staticmethod - def _make_list_value(values=(), value_pbs=None): - from google.protobuf.struct_pb2 import ListValue - from google.protobuf.struct_pb2 import Value - from google.cloud.spanner_v1._helpers import _make_list_value_pb - - if value_pbs is not None: - return Value(list_value=ListValue(values=value_pbs)) - return Value(list_value=_make_list_value_pb(values)) - @staticmethod def _make_result_set_metadata(fields=(), transaction_id=None): - from google.cloud.spanner_v1.proto.result_set_pb2 import ResultSetMetadata + from google.cloud.spanner_v1 import ResultSetMetadata + from google.cloud.spanner_v1 import StructType - metadata = ResultSetMetadata() + metadata = ResultSetMetadata(row_type=StructType(fields=[])) for field in fields: - metadata.row_type.fields.add().CopyFrom(field) + metadata.row_type.fields.append(field) if transaction_id is not None: metadata.transaction.id = transaction_id return metadata @staticmethod def _make_result_set_stats(query_plan=None, **kw): - from google.cloud.spanner_v1.proto.result_set_pb2 import ResultSetStats + from google.cloud.spanner_v1 import ResultSetStats from google.protobuf.struct_pb2 import Struct from google.cloud.spanner_v1._helpers import _make_value_pb @@ -123,18 +116,23 @@ def _make_result_set_stats(query_plan=None, **kw): def _make_partial_result_set( values, metadata=None, stats=None, chunked_value=False ): - from google.cloud.spanner_v1.proto.result_set_pb2 import PartialResultSet + from google.cloud.spanner_v1 import PartialResultSet - return PartialResultSet( - values=values, metadata=metadata, stats=stats, chunked_value=chunked_value + results = PartialResultSet( + metadata=metadata, stats=stats, chunked_value=chunked_value ) + for v in values: + results.values.append(v) + return results def test_properties_set(self): + from google.cloud.spanner_v1 import TypeCode + iterator = _MockCancellableIterator() streamed = self._make_one(iterator) FIELDS = [ - self._make_scalar_field("full_name", "STRING"), - self._make_scalar_field("age", "INT64"), + self._make_scalar_field("full_name", TypeCode.STRING), + self._make_scalar_field("age", TypeCode.INT64), ] metadata = streamed._metadata = self._make_result_set_metadata(FIELDS) stats = streamed._stats = self._make_result_set_stats() @@ -144,87 +142,100 @@ def test_properties_set(self): def test__merge_chunk_bool(self): from google.cloud.spanner_v1.streamed import Unmergeable + from google.cloud.spanner_v1 import TypeCode iterator = _MockCancellableIterator() streamed = self._make_one(iterator) - FIELDS = [self._make_scalar_field("registered_voter", "BOOL")] + FIELDS = [self._make_scalar_field("registered_voter", TypeCode.BOOL)] streamed._metadata = self._make_result_set_metadata(FIELDS) - streamed._pending_chunk = self._make_value(True) - chunk = self._make_value(False) + streamed._pending_chunk = True + chunk = False with self.assertRaises(Unmergeable): streamed._merge_chunk(chunk) def test__merge_chunk_int64(self): + from google.cloud.spanner_v1 import TypeCode + iterator = _MockCancellableIterator() streamed = self._make_one(iterator) - FIELDS = [self._make_scalar_field("age", "INT64")] + FIELDS = [self._make_scalar_field("age", TypeCode.INT64)] streamed._metadata = self._make_result_set_metadata(FIELDS) - streamed._pending_chunk = self._make_value(42) - chunk = self._make_value(13) + streamed._pending_chunk = 42 + chunk = 13 merged = streamed._merge_chunk(chunk) - self.assertEqual(merged.string_value, "4213") + self.assertEqual(merged, 4213) self.assertIsNone(streamed._pending_chunk) def test__merge_chunk_float64_nan_string(self): + from google.cloud.spanner_v1 import TypeCode + from math import isnan + iterator = _MockCancellableIterator() streamed = self._make_one(iterator) - FIELDS = [self._make_scalar_field("weight", "FLOAT64")] + FIELDS = [self._make_scalar_field("weight", TypeCode.FLOAT64)] streamed._metadata = self._make_result_set_metadata(FIELDS) - streamed._pending_chunk = self._make_value(u"Na") - chunk = self._make_value(u"N") + streamed._pending_chunk = u"Na" + chunk = u"N" merged = streamed._merge_chunk(chunk) - self.assertEqual(merged.string_value, u"NaN") + self.assertTrue(isnan(merged)) def test__merge_chunk_float64_w_empty(self): + from google.cloud.spanner_v1 import TypeCode + iterator = _MockCancellableIterator() streamed = self._make_one(iterator) - FIELDS = [self._make_scalar_field("weight", "FLOAT64")] + FIELDS = [self._make_scalar_field("weight", TypeCode.FLOAT64)] streamed._metadata = self._make_result_set_metadata(FIELDS) - streamed._pending_chunk = self._make_value(3.14159) - chunk = self._make_value("") + streamed._pending_chunk = 3.14159 + chunk = "" merged = streamed._merge_chunk(chunk) - self.assertEqual(merged.number_value, 3.14159) + self.assertEqual(merged, 3.14159) def test__merge_chunk_float64_w_float64(self): from google.cloud.spanner_v1.streamed import Unmergeable + from google.cloud.spanner_v1 import TypeCode iterator = _MockCancellableIterator() streamed = self._make_one(iterator) - FIELDS = [self._make_scalar_field("weight", "FLOAT64")] + FIELDS = [self._make_scalar_field("weight", TypeCode.FLOAT64)] streamed._metadata = self._make_result_set_metadata(FIELDS) - streamed._pending_chunk = self._make_value(3.14159) - chunk = self._make_value(2.71828) + streamed._pending_chunk = 3.14159 + chunk = 2.71828 with self.assertRaises(Unmergeable): streamed._merge_chunk(chunk) def test__merge_chunk_string(self): + from google.cloud.spanner_v1 import TypeCode + iterator = _MockCancellableIterator() streamed = self._make_one(iterator) - FIELDS = [self._make_scalar_field("name", "STRING")] + FIELDS = [self._make_scalar_field("name", TypeCode.STRING)] streamed._metadata = self._make_result_set_metadata(FIELDS) - streamed._pending_chunk = self._make_value(u"phred") - chunk = self._make_value(u"wylma") + streamed._pending_chunk = u"phred" + chunk = u"wylma" merged = streamed._merge_chunk(chunk) - self.assertEqual(merged.string_value, u"phredwylma") + self.assertEqual(merged, u"phredwylma") self.assertIsNone(streamed._pending_chunk) def test__merge_chunk_string_w_bytes(self): + from google.cloud.spanner_v1 import TypeCode + iterator = _MockCancellableIterator() streamed = self._make_one(iterator) - FIELDS = [self._make_scalar_field("image", "BYTES")] + FIELDS = [self._make_scalar_field("image", TypeCode.BYTES)] streamed._metadata = self._make_result_set_metadata(FIELDS) - streamed._pending_chunk = self._make_value( + streamed._pending_chunk = ( u"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAAAAAA" u"6fptVAAAACXBIWXMAAAsTAAALEwEAmpwYAAAA\n" ) - chunk = self._make_value( + chunk = ( u"B3RJTUUH4QQGFwsBTL3HMwAAABJpVFh0Q29tbWVudAAAAAAAU0FNUExF" u"MG3E+AAAAApJREFUCNdj\nYAAAAAIAAeIhvDMAAAAASUVORK5CYII=\n" ) @@ -232,42 +243,47 @@ def test__merge_chunk_string_w_bytes(self): merged = streamed._merge_chunk(chunk) self.assertEqual( - merged.string_value, - u"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAAAAAA6fptVAAAACXBIWXMAAAsTAAAL" - u"EwEAmpwYAAAA\nB3RJTUUH4QQGFwsBTL3HMwAAABJpVFh0Q29tbWVudAAAAAAAU0" - u"FNUExFMG3E+AAAAApJREFUCNdj\nYAAAAAIAAeIhvDMAAAAASUVORK5CYII=\n", + merged, + b"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAAAAAA6fptVAAAACXBIWXMAAAsTAAAL" + b"EwEAmpwYAAAA\nB3RJTUUH4QQGFwsBTL3HMwAAABJpVFh0Q29tbWVudAAAAAAAU0" + b"FNUExFMG3E+AAAAApJREFUCNdj\nYAAAAAIAAeIhvDMAAAAASUVORK5CYII=\n", ) self.assertIsNone(streamed._pending_chunk) def test__merge_chunk_array_of_bool(self): + from google.cloud.spanner_v1 import TypeCode + iterator = _MockCancellableIterator() streamed = self._make_one(iterator) - FIELDS = [self._make_array_field("name", element_type_code="BOOL")] + FIELDS = [self._make_array_field("name", element_type_code=TypeCode.BOOL)] streamed._metadata = self._make_result_set_metadata(FIELDS) - streamed._pending_chunk = self._make_list_value([True, True]) - chunk = self._make_list_value([False, False, False]) + streamed._pending_chunk = [True, True] + chunk = [False, False, False] merged = streamed._merge_chunk(chunk) - expected = self._make_list_value([True, True, False, False, False]) + expected = [True, True, False, False, False] self.assertEqual(merged, expected) self.assertIsNone(streamed._pending_chunk) def test__merge_chunk_array_of_int(self): + from google.cloud.spanner_v1 import TypeCode + iterator = _MockCancellableIterator() streamed = self._make_one(iterator) - FIELDS = [self._make_array_field("name", element_type_code="INT64")] + FIELDS = [self._make_array_field("name", element_type_code=TypeCode.INT64)] streamed._metadata = self._make_result_set_metadata(FIELDS) - streamed._pending_chunk = self._make_list_value([0, 1, 2]) - chunk = self._make_list_value([3, 4, 5]) + streamed._pending_chunk = [0, 1, 2] + chunk = [3, 4, 5] merged = streamed._merge_chunk(chunk) - expected = self._make_list_value([0, 1, 23, 4, 5]) + expected = [0, 1, 23, 4, 5] self.assertEqual(merged, expected) self.assertIsNone(streamed._pending_chunk) def test__merge_chunk_array_of_float(self): + from google.cloud.spanner_v1 import TypeCode import math PI = math.pi @@ -276,175 +292,191 @@ def test__merge_chunk_array_of_float(self): LOG_10 = math.log(10) iterator = _MockCancellableIterator() streamed = self._make_one(iterator) - FIELDS = [self._make_array_field("name", element_type_code="FLOAT64")] + FIELDS = [self._make_array_field("name", element_type_code=TypeCode.FLOAT64)] streamed._metadata = self._make_result_set_metadata(FIELDS) - streamed._pending_chunk = self._make_list_value([PI, SQRT_2]) - chunk = self._make_list_value(["", EULER, LOG_10]) + streamed._pending_chunk = [PI, SQRT_2] + chunk = ["", EULER, LOG_10] merged = streamed._merge_chunk(chunk) - expected = self._make_list_value([PI, SQRT_2, EULER, LOG_10]) + expected = [PI, SQRT_2, EULER, LOG_10] self.assertEqual(merged, expected) self.assertIsNone(streamed._pending_chunk) def test__merge_chunk_array_of_string_with_empty(self): + from google.cloud.spanner_v1 import TypeCode + iterator = _MockCancellableIterator() streamed = self._make_one(iterator) - FIELDS = [self._make_array_field("name", element_type_code="STRING")] + FIELDS = [self._make_array_field("name", element_type_code=TypeCode.STRING)] streamed._metadata = self._make_result_set_metadata(FIELDS) - streamed._pending_chunk = self._make_list_value([u"A", u"B", u"C"]) - chunk = self._make_list_value([]) + streamed._pending_chunk = [u"A", u"B", u"C"] + chunk = [] merged = streamed._merge_chunk(chunk) - expected = self._make_list_value([u"A", u"B", u"C"]) + expected = [u"A", u"B", u"C"] self.assertEqual(merged, expected) self.assertIsNone(streamed._pending_chunk) def test__merge_chunk_array_of_string(self): + from google.cloud.spanner_v1 import TypeCode + iterator = _MockCancellableIterator() streamed = self._make_one(iterator) - FIELDS = [self._make_array_field("name", element_type_code="STRING")] + FIELDS = [self._make_array_field("name", element_type_code=TypeCode.STRING)] streamed._metadata = self._make_result_set_metadata(FIELDS) - streamed._pending_chunk = self._make_list_value([u"A", u"B", u"C"]) - chunk = self._make_list_value([None, u"D", u"E"]) + streamed._pending_chunk = [u"A", u"B", u"C"] + chunk = [None, u"D", u"E"] merged = streamed._merge_chunk(chunk) - expected = self._make_list_value([u"A", u"B", u"C", None, u"D", u"E"]) + expected = [u"A", u"B", u"C", None, u"D", u"E"] self.assertEqual(merged, expected) self.assertIsNone(streamed._pending_chunk) def test__merge_chunk_array_of_string_with_null(self): + from google.cloud.spanner_v1 import TypeCode + iterator = _MockCancellableIterator() streamed = self._make_one(iterator) - FIELDS = [self._make_array_field("name", element_type_code="STRING")] + FIELDS = [self._make_array_field("name", element_type_code=TypeCode.STRING)] streamed._metadata = self._make_result_set_metadata(FIELDS) - streamed._pending_chunk = self._make_list_value([u"A", u"B", u"C"]) - chunk = self._make_list_value([u"D", u"E"]) + streamed._pending_chunk = [u"A", u"B", u"C"] + chunk = [u"D", u"E"] merged = streamed._merge_chunk(chunk) - expected = self._make_list_value([u"A", u"B", u"CD", u"E"]) + expected = [u"A", u"B", u"CD", u"E"] self.assertEqual(merged, expected) self.assertIsNone(streamed._pending_chunk) def test__merge_chunk_array_of_array_of_int(self): - from google.cloud.spanner_v1.proto.type_pb2 import StructType - from google.cloud.spanner_v1.proto.type_pb2 import Type + from google.cloud.spanner_v1 import StructType + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import TypeCode - subarray_type = Type(code="ARRAY", array_element_type=Type(code="INT64")) - array_type = Type(code="ARRAY", array_element_type=subarray_type) + subarray_type = Type( + code=TypeCode.ARRAY, array_element_type=Type(code=TypeCode.INT64) + ) + array_type = Type(code=TypeCode.ARRAY, array_element_type=subarray_type) iterator = _MockCancellableIterator() streamed = self._make_one(iterator) - FIELDS = [StructType.Field(name="loloi", type=array_type)] + FIELDS = [StructType.Field(name="loloi", type_=array_type)] streamed._metadata = self._make_result_set_metadata(FIELDS) - streamed._pending_chunk = self._make_list_value( - value_pbs=[self._make_list_value([0, 1]), self._make_list_value([2])] - ) - chunk = self._make_list_value( - value_pbs=[self._make_list_value([3]), self._make_list_value([4, 5])] - ) + streamed._pending_chunk = [[0, 1], [2]] + chunk = [[3], [4, 5]] merged = streamed._merge_chunk(chunk) - expected = self._make_list_value( - value_pbs=[ - self._make_list_value([0, 1]), - self._make_list_value([23]), - self._make_list_value([4, 5]), - ] - ) + expected = [ + [0, 1], + [23], + [4, 5], + ] + self.assertEqual(merged, expected) self.assertIsNone(streamed._pending_chunk) def test__merge_chunk_array_of_array_of_string(self): - from google.cloud.spanner_v1.proto.type_pb2 import StructType - from google.cloud.spanner_v1.proto.type_pb2 import Type + from google.cloud.spanner_v1 import StructType + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import TypeCode - subarray_type = Type(code="ARRAY", array_element_type=Type(code="STRING")) - array_type = Type(code="ARRAY", array_element_type=subarray_type) + subarray_type = Type( + code=TypeCode.ARRAY, array_element_type=Type(code=TypeCode.STRING) + ) + array_type = Type(code=TypeCode.ARRAY, array_element_type=subarray_type) iterator = _MockCancellableIterator() streamed = self._make_one(iterator) - FIELDS = [StructType.Field(name="lolos", type=array_type)] + FIELDS = [StructType.Field(name="lolos", type_=array_type)] streamed._metadata = self._make_result_set_metadata(FIELDS) - streamed._pending_chunk = self._make_list_value( - value_pbs=[ - self._make_list_value([u"A", u"B"]), - self._make_list_value([u"C"]), - ] - ) - chunk = self._make_list_value( - value_pbs=[ - self._make_list_value([u"D"]), - self._make_list_value([u"E", u"F"]), - ] - ) + streamed._pending_chunk = [ + [u"A", u"B"], + [u"C"], + ] + chunk = [ + [u"D"], + [u"E", u"F"], + ] merged = streamed._merge_chunk(chunk) - expected = self._make_list_value( - value_pbs=[ - self._make_list_value([u"A", u"B"]), - self._make_list_value([u"CD"]), - self._make_list_value([u"E", u"F"]), - ] - ) + expected = [ + [u"A", u"B"], + [u"CD"], + [u"E", u"F"], + ] + self.assertEqual(merged, expected) self.assertIsNone(streamed._pending_chunk) def test__merge_chunk_array_of_struct(self): + from google.cloud.spanner_v1 import TypeCode + iterator = _MockCancellableIterator() streamed = self._make_one(iterator) - struct_type = self._make_struct_type([("name", "STRING"), ("age", "INT64")]) + struct_type = self._make_struct_type( + [("name", TypeCode.STRING), ("age", TypeCode.INT64)] + ) FIELDS = [self._make_array_field("test", element_type=struct_type)] streamed._metadata = self._make_result_set_metadata(FIELDS) - partial = self._make_list_value([u"Phred "]) - streamed._pending_chunk = self._make_list_value(value_pbs=[partial]) - rest = self._make_list_value([u"Phlyntstone", 31]) - chunk = self._make_list_value(value_pbs=[rest]) + partial = [u"Phred "] + streamed._pending_chunk = [partial] + rest = [u"Phlyntstone", 31] + chunk = [rest] merged = streamed._merge_chunk(chunk) - struct = self._make_list_value([u"Phred Phlyntstone", 31]) - expected = self._make_list_value(value_pbs=[struct]) + struct = [u"Phred Phlyntstone", 31] + expected = [struct] self.assertEqual(merged, expected) self.assertIsNone(streamed._pending_chunk) def test__merge_chunk_array_of_struct_with_empty(self): + from google.cloud.spanner_v1 import TypeCode + iterator = _MockCancellableIterator() streamed = self._make_one(iterator) - struct_type = self._make_struct_type([("name", "STRING"), ("age", "INT64")]) + struct_type = self._make_struct_type( + [("name", TypeCode.STRING), ("age", TypeCode.INT64)] + ) FIELDS = [self._make_array_field("test", element_type=struct_type)] streamed._metadata = self._make_result_set_metadata(FIELDS) - partial = self._make_list_value([u"Phred "]) - streamed._pending_chunk = self._make_list_value(value_pbs=[partial]) - rest = self._make_list_value([]) - chunk = self._make_list_value(value_pbs=[rest]) + partial = [u"Phred "] + streamed._pending_chunk = [partial] + rest = [] + chunk = [rest] merged = streamed._merge_chunk(chunk) - expected = self._make_list_value(value_pbs=[partial]) + expected = [partial] self.assertEqual(merged, expected) self.assertIsNone(streamed._pending_chunk) def test__merge_chunk_array_of_struct_unmergeable(self): + from google.cloud.spanner_v1 import TypeCode + iterator = _MockCancellableIterator() streamed = self._make_one(iterator) struct_type = self._make_struct_type( - [("name", "STRING"), ("registered", "BOOL"), ("voted", "BOOL")] + [ + ("name", TypeCode.STRING), + ("registered", TypeCode.BOOL), + ("voted", TypeCode.BOOL), + ] ) FIELDS = [self._make_array_field("test", element_type=struct_type)] streamed._metadata = self._make_result_set_metadata(FIELDS) - partial = self._make_list_value([u"Phred Phlyntstone", True]) - streamed._pending_chunk = self._make_list_value(value_pbs=[partial]) - rest = self._make_list_value([True]) - chunk = self._make_list_value(value_pbs=[rest]) + partial = [u"Phred Phlyntstone", True] + streamed._pending_chunk = [partial] + rest = [True] + chunk = [rest] merged = streamed._merge_chunk(chunk) - struct = self._make_list_value([u"Phred Phlyntstone", True, True]) - expected = self._make_list_value(value_pbs=[struct]) + struct = [u"Phred Phlyntstone", True, True] + expected = [struct] self.assertEqual(merged, expected) self.assertIsNone(streamed._pending_chunk) @@ -456,25 +488,27 @@ def test__merge_chunk_array_of_struct_unmergeable_split(self): ) FIELDS = [self._make_array_field("test", element_type=struct_type)] streamed._metadata = self._make_result_set_metadata(FIELDS) - partial = self._make_list_value([u"Phred Phlyntstone", 1.65]) - streamed._pending_chunk = self._make_list_value(value_pbs=[partial]) - rest = self._make_list_value(["brown"]) - chunk = self._make_list_value(value_pbs=[rest]) + partial = [u"Phred Phlyntstone", 1.65] + streamed._pending_chunk = [partial] + rest = ["brown"] + chunk = [rest] merged = streamed._merge_chunk(chunk) - struct = self._make_list_value([u"Phred Phlyntstone", 1.65, "brown"]) - expected = self._make_list_value(value_pbs=[struct]) + struct = [u"Phred Phlyntstone", 1.65, "brown"] + expected = [struct] self.assertEqual(merged, expected) self.assertIsNone(streamed._pending_chunk) def test_merge_values_empty_and_empty(self): + from google.cloud.spanner_v1 import TypeCode + iterator = _MockCancellableIterator() streamed = self._make_one(iterator) FIELDS = [ - self._make_scalar_field("full_name", "STRING"), - self._make_scalar_field("age", "INT64"), - self._make_scalar_field("married", "BOOL"), + self._make_scalar_field("full_name", TypeCode.STRING), + self._make_scalar_field("age", TypeCode.INT64), + self._make_scalar_field("married", TypeCode.BOOL), ] streamed._metadata = self._make_result_set_metadata(FIELDS) streamed._current_row = [] @@ -483,46 +517,61 @@ def test_merge_values_empty_and_empty(self): self.assertEqual(streamed._current_row, []) def test_merge_values_empty_and_partial(self): + from google.cloud.spanner_v1 import TypeCode + iterator = _MockCancellableIterator() streamed = self._make_one(iterator) FIELDS = [ - self._make_scalar_field("full_name", "STRING"), - self._make_scalar_field("age", "INT64"), - self._make_scalar_field("married", "BOOL"), + self._make_scalar_field("full_name", TypeCode.STRING), + self._make_scalar_field("age", TypeCode.INT64), + self._make_scalar_field("married", TypeCode.BOOL), ] streamed._metadata = self._make_result_set_metadata(FIELDS) + VALUES = [u"Phred Phlyntstone", "42"] BARE = [u"Phred Phlyntstone", 42] - VALUES = [self._make_value(bare) for bare in BARE] streamed._current_row = [] streamed._merge_values(VALUES) self.assertEqual(list(streamed), []) self.assertEqual(streamed._current_row, BARE) def test_merge_values_empty_and_filled(self): + from google.cloud.spanner_v1 import TypeCode + iterator = _MockCancellableIterator() streamed = self._make_one(iterator) FIELDS = [ - self._make_scalar_field("full_name", "STRING"), - self._make_scalar_field("age", "INT64"), - self._make_scalar_field("married", "BOOL"), + self._make_scalar_field("full_name", TypeCode.STRING), + self._make_scalar_field("age", TypeCode.INT64), + self._make_scalar_field("married", TypeCode.BOOL), ] streamed._metadata = self._make_result_set_metadata(FIELDS) + VALUES = [u"Phred Phlyntstone", "42", True] BARE = [u"Phred Phlyntstone", 42, True] - VALUES = [self._make_value(bare) for bare in BARE] streamed._current_row = [] streamed._merge_values(VALUES) self.assertEqual(list(streamed), [BARE]) self.assertEqual(streamed._current_row, []) def test_merge_values_empty_and_filled_plus(self): + from google.cloud.spanner_v1 import TypeCode + iterator = _MockCancellableIterator() streamed = self._make_one(iterator) FIELDS = [ - self._make_scalar_field("full_name", "STRING"), - self._make_scalar_field("age", "INT64"), - self._make_scalar_field("married", "BOOL"), + self._make_scalar_field("full_name", TypeCode.STRING), + self._make_scalar_field("age", TypeCode.INT64), + self._make_scalar_field("married", TypeCode.BOOL), ] streamed._metadata = self._make_result_set_metadata(FIELDS) + VALUES = [ + u"Phred Phlyntstone", + "42", + True, + u"Bharney Rhubble", + "39", + True, + u"Wylma Phlyntstone", + ] BARE = [ u"Phred Phlyntstone", 42, @@ -532,19 +581,20 @@ def test_merge_values_empty_and_filled_plus(self): True, u"Wylma Phlyntstone", ] - VALUES = [self._make_value(bare) for bare in BARE] streamed._current_row = [] streamed._merge_values(VALUES) self.assertEqual(list(streamed), [BARE[0:3], BARE[3:6]]) self.assertEqual(streamed._current_row, BARE[6:]) def test_merge_values_partial_and_empty(self): + from google.cloud.spanner_v1 import TypeCode + iterator = _MockCancellableIterator() streamed = self._make_one(iterator) FIELDS = [ - self._make_scalar_field("full_name", "STRING"), - self._make_scalar_field("age", "INT64"), - self._make_scalar_field("married", "BOOL"), + self._make_scalar_field("full_name", TypeCode.STRING), + self._make_scalar_field("age", TypeCode.INT64), + self._make_scalar_field("married", TypeCode.BOOL), ] streamed._metadata = self._make_result_set_metadata(FIELDS) BEFORE = [u"Phred Phlyntstone"] @@ -554,52 +604,58 @@ def test_merge_values_partial_and_empty(self): self.assertEqual(streamed._current_row, BEFORE) def test_merge_values_partial_and_partial(self): + from google.cloud.spanner_v1 import TypeCode + iterator = _MockCancellableIterator() streamed = self._make_one(iterator) FIELDS = [ - self._make_scalar_field("full_name", "STRING"), - self._make_scalar_field("age", "INT64"), - self._make_scalar_field("married", "BOOL"), + self._make_scalar_field("full_name", TypeCode.STRING), + self._make_scalar_field("age", TypeCode.INT64), + self._make_scalar_field("married", TypeCode.BOOL), ] streamed._metadata = self._make_result_set_metadata(FIELDS) BEFORE = [u"Phred Phlyntstone"] streamed._current_row[:] = BEFORE + TO_MERGE = ["42"] MERGED = [42] - TO_MERGE = [self._make_value(item) for item in MERGED] streamed._merge_values(TO_MERGE) self.assertEqual(list(streamed), []) self.assertEqual(streamed._current_row, BEFORE + MERGED) def test_merge_values_partial_and_filled(self): + from google.cloud.spanner_v1 import TypeCode + iterator = _MockCancellableIterator() streamed = self._make_one(iterator) FIELDS = [ - self._make_scalar_field("full_name", "STRING"), - self._make_scalar_field("age", "INT64"), - self._make_scalar_field("married", "BOOL"), + self._make_scalar_field("full_name", TypeCode.STRING), + self._make_scalar_field("age", TypeCode.INT64), + self._make_scalar_field("married", TypeCode.BOOL), ] streamed._metadata = self._make_result_set_metadata(FIELDS) BEFORE = [u"Phred Phlyntstone"] streamed._current_row[:] = BEFORE + TO_MERGE = ["42", True] MERGED = [42, True] - TO_MERGE = [self._make_value(item) for item in MERGED] streamed._merge_values(TO_MERGE) self.assertEqual(list(streamed), [BEFORE + MERGED]) self.assertEqual(streamed._current_row, []) def test_merge_values_partial_and_filled_plus(self): + from google.cloud.spanner_v1 import TypeCode + iterator = _MockCancellableIterator() streamed = self._make_one(iterator) FIELDS = [ - self._make_scalar_field("full_name", "STRING"), - self._make_scalar_field("age", "INT64"), - self._make_scalar_field("married", "BOOL"), + self._make_scalar_field("full_name", TypeCode.STRING), + self._make_scalar_field("age", TypeCode.INT64), + self._make_scalar_field("married", TypeCode.BOOL), ] streamed._metadata = self._make_result_set_metadata(FIELDS) BEFORE = [self._make_value(u"Phred Phlyntstone")] streamed._current_row[:] = BEFORE + TO_MERGE = ["42", True, u"Bharney Rhubble", "39", True, u"Wylma Phlyntstone"] MERGED = [42, True, u"Bharney Rhubble", 39, True, u"Wylma Phlyntstone"] - TO_MERGE = [self._make_value(item) for item in MERGED] VALUES = BEFORE + MERGED streamed._merge_values(TO_MERGE) self.assertEqual(list(streamed), [VALUES[0:3], VALUES[3:6]]) @@ -654,16 +710,17 @@ def test_consume_next_empty(self): streamed._consume_next() def test_consume_next_first_set_partial(self): + from google.cloud.spanner_v1 import TypeCode + TXN_ID = b"DEADBEEF" FIELDS = [ - self._make_scalar_field("full_name", "STRING"), - self._make_scalar_field("age", "INT64"), - self._make_scalar_field("married", "BOOL"), + self._make_scalar_field("full_name", TypeCode.STRING), + self._make_scalar_field("age", TypeCode.INT64), + self._make_scalar_field("married", TypeCode.BOOL), ] metadata = self._make_result_set_metadata(FIELDS, transaction_id=TXN_ID) BARE = [u"Phred Phlyntstone", 42] - VALUES = [self._make_value(bare) for bare in BARE] - result_set = self._make_partial_result_set(VALUES, metadata=metadata) + result_set = self._make_partial_result_set(BARE, metadata=metadata) iterator = _MockCancellableIterator(result_set) source = mock.Mock(_transaction_id=None, spec=["_transaction_id"]) streamed = self._make_one(iterator, source=source) @@ -674,11 +731,13 @@ def test_consume_next_first_set_partial(self): self.assertEqual(source._transaction_id, TXN_ID) def test_consume_next_first_set_partial_existing_txn_id(self): + from google.cloud.spanner_v1 import TypeCode + TXN_ID = b"DEADBEEF" FIELDS = [ - self._make_scalar_field("full_name", "STRING"), - self._make_scalar_field("age", "INT64"), - self._make_scalar_field("married", "BOOL"), + self._make_scalar_field("full_name", TypeCode.STRING), + self._make_scalar_field("age", TypeCode.INT64), + self._make_scalar_field("married", TypeCode.BOOL), ] metadata = self._make_result_set_metadata(FIELDS, transaction_id=b"") BARE = [u"Phred Phlyntstone", 42] @@ -694,10 +753,12 @@ def test_consume_next_first_set_partial_existing_txn_id(self): self.assertEqual(source._transaction_id, TXN_ID) def test_consume_next_w_partial_result(self): + from google.cloud.spanner_v1 import TypeCode + FIELDS = [ - self._make_scalar_field("full_name", "STRING"), - self._make_scalar_field("age", "INT64"), - self._make_scalar_field("married", "BOOL"), + self._make_scalar_field("full_name", TypeCode.STRING), + self._make_scalar_field("age", TypeCode.INT64), + self._make_scalar_field("married", TypeCode.BOOL), ] VALUES = [self._make_value(u"Phred ")] result_set = self._make_partial_result_set(VALUES, chunked_value=True) @@ -707,13 +768,15 @@ def test_consume_next_w_partial_result(self): streamed._consume_next() self.assertEqual(list(streamed), []) self.assertEqual(streamed._current_row, []) - self.assertEqual(streamed._pending_chunk, VALUES[0]) + self.assertEqual(streamed._pending_chunk, VALUES[0].string_value) def test_consume_next_w_pending_chunk(self): + from google.cloud.spanner_v1 import TypeCode + FIELDS = [ - self._make_scalar_field("full_name", "STRING"), - self._make_scalar_field("age", "INT64"), - self._make_scalar_field("married", "BOOL"), + self._make_scalar_field("full_name", TypeCode.STRING), + self._make_scalar_field("age", TypeCode.INT64), + self._make_scalar_field("married", TypeCode.BOOL), ] BARE = [ u"Phlyntstone", @@ -729,7 +792,7 @@ def test_consume_next_w_pending_chunk(self): iterator = _MockCancellableIterator(result_set) streamed = self._make_one(iterator) streamed._metadata = self._make_result_set_metadata(FIELDS) - streamed._pending_chunk = self._make_value(u"Phred ") + streamed._pending_chunk = u"Phred " streamed._consume_next() self.assertEqual( list(streamed), @@ -739,10 +802,12 @@ def test_consume_next_w_pending_chunk(self): self.assertIsNone(streamed._pending_chunk) def test_consume_next_last_set(self): + from google.cloud.spanner_v1 import TypeCode + FIELDS = [ - self._make_scalar_field("full_name", "STRING"), - self._make_scalar_field("age", "INT64"), - self._make_scalar_field("married", "BOOL"), + self._make_scalar_field("full_name", TypeCode.STRING), + self._make_scalar_field("age", TypeCode.INT64), + self._make_scalar_field("married", TypeCode.BOOL), ] metadata = self._make_result_set_metadata(FIELDS) stats = self._make_result_set_stats( @@ -766,14 +831,19 @@ def test___iter___empty(self): self.assertEqual(found, []) def test___iter___one_result_set_partial(self): + from google.cloud.spanner_v1 import TypeCode + from google.protobuf.struct_pb2 import Value + FIELDS = [ - self._make_scalar_field("full_name", "STRING"), - self._make_scalar_field("age", "INT64"), - self._make_scalar_field("married", "BOOL"), + self._make_scalar_field("full_name", TypeCode.STRING), + self._make_scalar_field("age", TypeCode.INT64), + self._make_scalar_field("married", TypeCode.BOOL), ] metadata = self._make_result_set_metadata(FIELDS) BARE = [u"Phred Phlyntstone", 42] VALUES = [self._make_value(bare) for bare in BARE] + for val in VALUES: + self.assertIsInstance(val, Value) result_set = self._make_partial_result_set(VALUES, metadata=metadata) iterator = _MockCancellableIterator(result_set) streamed = self._make_one(iterator) @@ -784,10 +854,12 @@ def test___iter___one_result_set_partial(self): self.assertEqual(streamed.metadata, metadata) def test___iter___multiple_result_sets_filled(self): + from google.cloud.spanner_v1 import TypeCode + FIELDS = [ - self._make_scalar_field("full_name", "STRING"), - self._make_scalar_field("age", "INT64"), - self._make_scalar_field("married", "BOOL"), + self._make_scalar_field("full_name", TypeCode.STRING), + self._make_scalar_field("age", TypeCode.INT64), + self._make_scalar_field("married", TypeCode.BOOL), ] metadata = self._make_result_set_metadata(FIELDS) BARE = [ @@ -820,10 +892,12 @@ def test___iter___multiple_result_sets_filled(self): self.assertIsNone(streamed._pending_chunk) def test___iter___w_existing_rows_read(self): + from google.cloud.spanner_v1 import TypeCode + FIELDS = [ - self._make_scalar_field("full_name", "STRING"), - self._make_scalar_field("age", "INT64"), - self._make_scalar_field("married", "BOOL"), + self._make_scalar_field("full_name", TypeCode.STRING), + self._make_scalar_field("age", TypeCode.INT64), + self._make_scalar_field("married", TypeCode.BOOL), ] metadata = self._make_result_set_metadata(FIELDS) ALREADY = [[u"Pebbylz Phlyntstone", 4, False], [u"Dino Rhubble", 4, False]] @@ -979,14 +1053,13 @@ def test_multiple_row_chunks_non_chunks_interleaved(self): def _generate_partial_result_sets(prs_text_pbs): - from google.protobuf.json_format import Parse - from google.cloud.spanner_v1.proto.result_set_pb2 import PartialResultSet + from google.cloud.spanner_v1 import PartialResultSet partial_result_sets = [] for prs_text_pb in prs_text_pbs: - prs = PartialResultSet() - partial_result_sets.append(Parse(prs_text_pb, prs)) + prs = PartialResultSet.from_json(prs_text_pb) + partial_result_sets.append(prs) return partial_result_sets @@ -1013,23 +1086,23 @@ def _normalize_float(cell): def _normalize_results(rows_data, fields): """Helper for _parse_streaming_read_acceptance_tests""" - from google.cloud.spanner_v1.proto import type_pb2 + from google.cloud.spanner_v1 import TypeCode normalized = [] for row_data in rows_data: row = [] assert len(row_data) == len(fields) for cell, field in zip(row_data, fields): - if field.type.code == type_pb2.INT64: + if field.type_.code == TypeCode.INT64: cell = int(cell) - if field.type.code == type_pb2.FLOAT64: + if field.type_.code == TypeCode.FLOAT64: cell = _normalize_float(cell) - elif field.type.code == type_pb2.BYTES: + elif field.type_.code == TypeCode.BYTES: cell = cell.encode("utf8") - elif field.type.code == type_pb2.ARRAY: - if field.type.array_element_type.code == type_pb2.INT64: + elif field.type_.code == TypeCode.ARRAY: + if field.type_.array_element_type.code == TypeCode.INT64: cell = _normalize_int_array(cell) - elif field.type.array_element_type.code == type_pb2.FLOAT64: + elif field.type_.array_element_type.code == TypeCode.FLOAT64: cell = [_normalize_float(subcell) for subcell in cell] row.append(cell) normalized.append(row) diff --git a/tests/unit/test_transaction.py b/tests/unit/test_transaction.py index e2ac7c2eec..2c3b45a664 100644 --- a/tests/unit/test_transaction.py +++ b/tests/unit/test_transaction.py @@ -15,6 +15,8 @@ import mock from tests._helpers import OpenTelemetryBase, StatusCanonicalCode +from google.cloud.spanner_v1 import Type +from google.cloud.spanner_v1 import TypeCode TABLE_NAME = "citizens" COLUMNS = ["email", "first_name", "last_name", "age"] @@ -31,7 +33,7 @@ VALUES ("Phred", "Phlyntstone", @age) """ PARAMS = {"age": 30} -PARAM_TYPES = {"age": "INT64"} +PARAM_TYPES = {"age": Type(code=TypeCode.INT64)} class TestTransaction(OpenTelemetryBase): @@ -47,9 +49,9 @@ class TestTransaction(OpenTelemetryBase): BASE_ATTRIBUTES = { "db.type": "spanner", - "db.url": "spanner.googleapis.com:443", + "db.url": "spanner.googleapis.com", "db.instance": "testing", - "net.host.name": "spanner.googleapis.com:443", + "net.host.name": "spanner.googleapis.com", } def _getTargetClass(self): @@ -63,11 +65,9 @@ def _make_one(self, session, *args, **kwargs): return transaction def _make_spanner_api(self): - import google.cloud.spanner_v1.gapic.spanner_client + from google.cloud.spanner_v1 import SpannerClient - return mock.create_autospec( - google.cloud.spanner_v1.gapic.spanner_client.SpannerClient, instance=True - ) + return mock.create_autospec(SpannerClient, instance=True) def test_ctor_session_w_existing_txn(self): session = _Session() @@ -164,9 +164,7 @@ def test_begin_w_other_error(self): ) def test_begin_ok(self): - from google.cloud.spanner_v1.proto.transaction_pb2 import ( - Transaction as TransactionPB, - ) + from google.cloud.spanner_v1 import Transaction as TransactionPB transaction_pb = TransactionPB(id=self.TRANSACTION_ID) database = _Database() @@ -183,7 +181,7 @@ def test_begin_ok(self): session_id, txn_options, metadata = api._begun self.assertEqual(session_id, session.name) - self.assertTrue(txn_options.HasField("read_write")) + self.assertTrue(type(txn_options).pb(txn_options).HasField("read_write")) self.assertEqual(metadata, [("google-cloud-resource-prefix", database.name)]) self.assertSpanAttributes( @@ -313,16 +311,14 @@ def test_commit_w_other_error(self): def _commit_helper(self, mutate=True): import datetime - from google.cloud.spanner_v1.proto.spanner_pb2 import CommitResponse + from google.cloud.spanner_v1 import CommitResponse from google.cloud.spanner_v1.keyset import KeySet from google.cloud._helpers import UTC - from google.cloud._helpers import _datetime_to_pb_timestamp now = datetime.datetime.utcnow().replace(tzinfo=UTC) - now_pb = _datetime_to_pb_timestamp(now) keys = [[0], [1], [2]] keyset = KeySet(keys=keys) - response = CommitResponse(commit_timestamp=now_pb) + response = CommitResponse(commit_timestamp=now) database = _Database() api = database.spanner_api = _FauxSpannerAPI(_commit_response=response) session = _Session(database) @@ -400,7 +396,6 @@ def test_execute_update_w_params_wo_param_types(self): database = _Database() database.spanner_api = self._make_spanner_api() session = _Session(database) - session = _Session() transaction = self._make_one(session) transaction._transaction_id = self.TRANSACTION_ID @@ -409,15 +404,16 @@ def test_execute_update_w_params_wo_param_types(self): def _execute_update_helper(self, count=0, query_options=None): from google.protobuf.struct_pb2 import Struct - from google.cloud.spanner_v1.proto.result_set_pb2 import ( + from google.cloud.spanner_v1 import ( ResultSet, ResultSetStats, ) - from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionSelector + from google.cloud.spanner_v1 import TransactionSelector from google.cloud.spanner_v1._helpers import ( _make_value_pb, _merge_query_options, ) + from google.cloud.spanner_v1 import ExecuteSqlRequest MODE = 2 # PROFILE stats_pb = ResultSetStats(row_count_exact=1) @@ -450,15 +446,18 @@ def _execute_update_helper(self, count=0, query_options=None): expected_query_options, query_options ) - api.execute_sql.assert_called_once_with( - self.SESSION_NAME, - DML_QUERY_WITH_PARAM, + expected_request = ExecuteSqlRequest( + session=self.SESSION_NAME, + sql=DML_QUERY_WITH_PARAM, transaction=expected_transaction, params=expected_params, param_types=PARAM_TYPES, query_mode=MODE, query_options=expected_query_options, seqno=count, + ) + api.execute_sql.assert_called_once_with( + request=expected_request, metadata=[("google-cloud-resource-prefix", database.name)], ) @@ -484,7 +483,7 @@ def test_execute_update_error(self): self.assertEqual(transaction._execute_sql_count, 1) def test_execute_update_w_query_options(self): - from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest + from google.cloud.spanner_v1 import ExecuteSqlRequest self._execute_update_helper( query_options=ExecuteSqlRequest.QueryOptions(optimizer_version="3") @@ -504,15 +503,17 @@ def test_batch_update_other_error(self): def _batch_update_helper(self, error_after=None, count=0): from google.rpc.status_pb2 import Status from google.protobuf.struct_pb2 import Struct - from google.cloud.spanner_v1.proto.result_set_pb2 import ResultSet - from google.cloud.spanner_v1.proto.result_set_pb2 import ResultSetStats - from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteBatchDmlResponse - from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionSelector + from google.cloud.spanner_v1 import param_types + from google.cloud.spanner_v1 import ResultSet + from google.cloud.spanner_v1 import ResultSetStats + from google.cloud.spanner_v1 import ExecuteBatchDmlRequest + from google.cloud.spanner_v1 import ExecuteBatchDmlResponse + from google.cloud.spanner_v1 import TransactionSelector from google.cloud.spanner_v1._helpers import _make_value_pb insert_dml = "INSERT INTO table(pkey, desc) VALUES (%pkey, %desc)" insert_params = {"pkey": 12345, "desc": "DESCRIPTION"} - insert_param_types = {"pkey": "INT64", "desc": "STRING"} + insert_param_types = {"pkey": param_types.INT64, "desc": param_types.STRING} update_dml = 'UPDATE table SET desc = desc + "-amended"' delete_dml = "DELETE FROM table WHERE desc IS NULL" @@ -558,20 +559,23 @@ def _batch_update_helper(self, error_after=None, count=0): } ) expected_statements = [ - { - "sql": insert_dml, - "params": expected_insert_params, - "param_types": insert_param_types, - }, - {"sql": update_dml}, - {"sql": delete_dml}, + ExecuteBatchDmlRequest.Statement( + sql=insert_dml, + params=expected_insert_params, + param_types=insert_param_types, + ), + ExecuteBatchDmlRequest.Statement(sql=update_dml), + ExecuteBatchDmlRequest.Statement(sql=delete_dml), ] - api.execute_batch_dml.assert_called_once_with( + expected_request = ExecuteBatchDmlRequest( session=self.SESSION_NAME, transaction=expected_transaction, statements=expected_statements, seqno=count, + ) + api.execute_batch_dml.assert_called_once_with( + request=expected_request, metadata=[("google-cloud-resource-prefix", database.name)], ) @@ -584,6 +588,9 @@ def test_batch_update_w_errors(self): self._batch_update_helper(error_after=2, count=1) def test_batch_update_error(self): + from google.cloud.spanner_v1 import Type + from google.cloud.spanner_v1 import TypeCode + database = _Database() api = database.spanner_api = self._make_spanner_api() api.execute_batch_dml.side_effect = RuntimeError() @@ -593,7 +600,10 @@ def test_batch_update_error(self): insert_dml = "INSERT INTO table(pkey, desc) VALUES (%pkey, %desc)" insert_params = {"pkey": 12345, "desc": "DESCRIPTION"} - insert_param_types = {"pkey": "INT64", "desc": "STRING"} + insert_param_types = { + "pkey": Type(code=TypeCode.INT64), + "desc": Type(code=TypeCode.STRING), + } update_dml = 'UPDATE table SET desc = desc + "-amended"' delete_dml = "DELETE FROM table WHERE desc IS NULL" @@ -610,18 +620,13 @@ def test_batch_update_error(self): def test_context_mgr_success(self): import datetime - from google.cloud.spanner_v1.proto.spanner_pb2 import CommitResponse - from google.cloud.spanner_v1.proto.transaction_pb2 import ( - Transaction as TransactionPB, - ) + from google.cloud.spanner_v1 import CommitResponse + from google.cloud.spanner_v1 import Transaction as TransactionPB from google.cloud._helpers import UTC - from google.cloud._helpers import _datetime_to_pb_timestamp transaction_pb = TransactionPB(id=self.TRANSACTION_ID) - database = _Database() now = datetime.datetime.utcnow().replace(tzinfo=UTC) - now_pb = _datetime_to_pb_timestamp(now) - response = CommitResponse(commit_timestamp=now_pb) + response = CommitResponse(commit_timestamp=now) database = _Database() api = database.spanner_api = _FauxSpannerAPI( _begin_transaction_response=transaction_pb, _commit_response=response @@ -644,9 +649,7 @@ def test_context_mgr_failure(self): from google.protobuf.empty_pb2 import Empty empty_pb = Empty() - from google.cloud.spanner_v1.proto.transaction_pb2 import ( - Transaction as TransactionPB, - ) + from google.cloud.spanner_v1 import Transaction as TransactionPB transaction_pb = TransactionPB(id=self.TRANSACTION_ID) database = _Database() @@ -675,7 +678,7 @@ def test_context_mgr_failure(self): class _Client(object): def __init__(self): - from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest + from google.cloud.spanner_v1 import ExecuteSqlRequest self._query_options = ExecuteSqlRequest.QueryOptions(optimizer_version="1") @@ -707,18 +710,18 @@ class _FauxSpannerAPI(object): def __init__(self, **kwargs): self.__dict__.update(**kwargs) - def begin_transaction(self, session, options_, metadata=None): - self._begun = (session, options_, metadata) + def begin_transaction(self, session=None, options=None, metadata=None): + self._begun = (session, options, metadata) return self._begin_transaction_response - def rollback(self, session, transaction_id, metadata=None): + def rollback(self, session=None, transaction_id=None, metadata=None): self._rolled_back = (session, transaction_id, metadata) return self._rollback_response def commit( self, - session, - mutations, + session=None, + mutations=None, transaction_id="", single_use_transaction=None, metadata=None,